1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2016-2023 NXP 5 * 6 */ 7 8 #include <time.h> 9 #include <net/if.h> 10 #include <unistd.h> 11 12 #include <rte_ip.h> 13 #include <rte_udp.h> 14 #include <rte_mbuf.h> 15 #include <rte_cryptodev.h> 16 #include <rte_malloc.h> 17 #include <rte_memcpy.h> 18 #include <rte_string_fns.h> 19 #include <rte_cycles.h> 20 #include <rte_kvargs.h> 21 #include <dev_driver.h> 22 #include <cryptodev_pmd.h> 23 #include <rte_common.h> 24 #include <bus_fslmc_driver.h> 25 #include <fslmc_vfio.h> 26 #include <dpaa2_hw_pvt.h> 27 #include <dpaa2_hw_dpio.h> 28 #include <dpaa2_hw_mempool.h> 29 #include <fsl_dpopr.h> 30 #include <fsl_dpseci.h> 31 #include <fsl_mc_sys.h> 32 #include <rte_hexdump.h> 33 34 #include "dpaa2_sec_priv.h" 35 #include "dpaa2_sec_event.h" 36 #include "dpaa2_sec_logs.h" 37 38 /* RTA header files */ 39 #include <desc/ipsec.h> 40 #include <desc/pdcp.h> 41 #include <desc/sdap.h> 42 #include <desc/algo.h> 43 44 /* Minimum job descriptor consists of a oneword job descriptor HEADER and 45 * a pointer to the shared descriptor 46 */ 47 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ) 48 #define FSL_VENDOR_ID 0x1957 49 #define FSL_DEVICE_ID 0x410 50 #define FSL_SUBSYSTEM_SEC 1 51 #define FSL_MC_DPSECI_DEVID 3 52 53 #define NO_PREFETCH 0 54 55 #define DRIVER_DUMP_MODE "drv_dump_mode" 56 #define DRIVER_STRICT_ORDER "drv_strict_order" 57 58 /* DPAA2_SEC_DP_DUMP levels */ 59 enum dpaa2_sec_dump_levels { 60 DPAA2_SEC_DP_NO_DUMP, 61 DPAA2_SEC_DP_ERR_DUMP, 62 DPAA2_SEC_DP_FULL_DUMP 63 }; 64 65 uint8_t cryptodev_driver_id; 66 uint8_t dpaa2_sec_dp_dump = DPAA2_SEC_DP_ERR_DUMP; 67 68 static inline void 69 free_fle(const struct qbman_fd *fd, struct dpaa2_sec_qp *qp) 70 { 71 struct qbman_fle *fle; 72 struct rte_crypto_op *op; 73 74 if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single) 75 return; 76 77 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); 78 op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1)); 79 /* free the fle memory */ 80 if (likely(rte_pktmbuf_is_contiguous(op->sym->m_src))) 81 rte_mempool_put(qp->fle_pool, (void *)(fle-1)); 82 else 83 rte_free((void *)(fle-1)); 84 } 85 86 static inline int 87 build_proto_compound_sg_fd(dpaa2_sec_session *sess, 88 struct rte_crypto_op *op, 89 struct qbman_fd *fd, uint16_t bpid) 90 { 91 struct rte_crypto_sym_op *sym_op = op->sym; 92 struct ctxt_priv *priv = sess->ctxt; 93 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 94 struct sec_flow_context *flc; 95 struct rte_mbuf *mbuf; 96 uint32_t in_len = 0, out_len = 0; 97 98 if (sym_op->m_dst) 99 mbuf = sym_op->m_dst; 100 else 101 mbuf = sym_op->m_src; 102 103 /* first FLE entry used to store mbuf and session ctxt */ 104 fle = (struct qbman_fle *)rte_malloc(NULL, 105 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 106 RTE_CACHE_LINE_SIZE); 107 if (unlikely(!fle)) { 108 DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE"); 109 return -ENOMEM; 110 } 111 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 112 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 113 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 114 115 /* Save the shared descriptor */ 116 flc = &priv->flc_desc[0].flc; 117 118 op_fle = fle + 1; 119 ip_fle = fle + 2; 120 sge = fle + 3; 121 122 if (likely(bpid < MAX_BPID)) { 123 DPAA2_SET_FD_BPID(fd, bpid); 124 DPAA2_SET_FLE_BPID(op_fle, bpid); 125 DPAA2_SET_FLE_BPID(ip_fle, bpid); 126 } else { 127 DPAA2_SET_FD_IVP(fd); 128 DPAA2_SET_FLE_IVP(op_fle); 129 DPAA2_SET_FLE_IVP(ip_fle); 130 } 131 132 /* Configure FD as a FRAME LIST */ 133 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 134 DPAA2_SET_FD_COMPOUND_FMT(fd); 135 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 136 137 /* Configure Output FLE with Scatter/Gather Entry */ 138 DPAA2_SET_FLE_SG_EXT(op_fle); 139 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 140 141 /* Configure Output SGE for Encap/Decap */ 142 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); 143 /* o/p segs */ 144 while (mbuf->next) { 145 sge->length = mbuf->data_len; 146 out_len += sge->length; 147 sge++; 148 mbuf = mbuf->next; 149 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); 150 } 151 /* using buf_len for last buf - so that extra data can be added */ 152 sge->length = mbuf->buf_len - mbuf->data_off; 153 out_len += sge->length; 154 155 DPAA2_SET_FLE_FIN(sge); 156 op_fle->length = out_len; 157 158 sge++; 159 mbuf = sym_op->m_src; 160 161 /* Configure Input FLE with Scatter/Gather Entry */ 162 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 163 DPAA2_SET_FLE_SG_EXT(ip_fle); 164 DPAA2_SET_FLE_FIN(ip_fle); 165 166 /* Configure input SGE for Encap/Decap */ 167 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); 168 sge->length = mbuf->data_len; 169 in_len += sge->length; 170 171 mbuf = mbuf->next; 172 /* i/p segs */ 173 while (mbuf) { 174 sge++; 175 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); 176 sge->length = mbuf->data_len; 177 in_len += sge->length; 178 mbuf = mbuf->next; 179 } 180 ip_fle->length = in_len; 181 DPAA2_SET_FLE_FIN(sge); 182 183 /* In case of PDCP, per packet HFN is stored in 184 * mbuf priv after sym_op. 185 */ 186 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) { 187 uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op + 188 sess->pdcp.hfn_ovd_offset); 189 /* enable HFN override */ 190 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd); 191 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd); 192 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd); 193 } 194 DPAA2_SET_FD_LEN(fd, ip_fle->length); 195 196 return 0; 197 } 198 199 static inline int 200 build_proto_compound_fd(dpaa2_sec_session *sess, 201 struct rte_crypto_op *op, 202 struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp) 203 { 204 struct rte_crypto_sym_op *sym_op = op->sym; 205 struct ctxt_priv *priv = sess->ctxt; 206 struct qbman_fle *fle, *ip_fle, *op_fle; 207 struct sec_flow_context *flc; 208 struct rte_mbuf *src_mbuf = sym_op->m_src; 209 struct rte_mbuf *dst_mbuf = sym_op->m_dst; 210 int retval; 211 212 if (!dst_mbuf) 213 dst_mbuf = src_mbuf; 214 215 /* Save the shared descriptor */ 216 flc = &priv->flc_desc[0].flc; 217 218 /* we are using the first FLE entry to store Mbuf */ 219 retval = rte_mempool_get(qp->fle_pool, (void **)(&fle)); 220 if (retval) { 221 DPAA2_SEC_DP_DEBUG("Proto: Memory alloc failed"); 222 return -ENOMEM; 223 } 224 memset(fle, 0, FLE_POOL_BUF_SIZE); 225 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 226 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 227 228 op_fle = fle + 1; 229 ip_fle = fle + 2; 230 231 if (likely(bpid < MAX_BPID)) { 232 DPAA2_SET_FD_BPID(fd, bpid); 233 DPAA2_SET_FLE_BPID(op_fle, bpid); 234 DPAA2_SET_FLE_BPID(ip_fle, bpid); 235 } else { 236 DPAA2_SET_FD_IVP(fd); 237 DPAA2_SET_FLE_IVP(op_fle); 238 DPAA2_SET_FLE_IVP(ip_fle); 239 } 240 241 /* Configure FD as a FRAME LIST */ 242 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 243 DPAA2_SET_FD_COMPOUND_FMT(fd); 244 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 245 246 /* Configure Output FLE with dst mbuf data */ 247 DPAA2_SET_FLE_ADDR(op_fle, rte_pktmbuf_iova(dst_mbuf)); 248 DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len); 249 250 /* Configure Input FLE with src mbuf data */ 251 DPAA2_SET_FLE_ADDR(ip_fle, rte_pktmbuf_iova(src_mbuf)); 252 DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len); 253 254 DPAA2_SET_FD_LEN(fd, ip_fle->length); 255 DPAA2_SET_FLE_FIN(ip_fle); 256 257 /* In case of PDCP, per packet HFN is stored in 258 * mbuf priv after sym_op. 259 */ 260 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) { 261 uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op + 262 sess->pdcp.hfn_ovd_offset); 263 /* enable HFN override */ 264 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd); 265 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd); 266 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd); 267 } 268 269 return 0; 270 271 } 272 273 static inline int 274 build_proto_fd(dpaa2_sec_session *sess, 275 struct rte_crypto_op *op, 276 struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp) 277 { 278 struct rte_crypto_sym_op *sym_op = op->sym; 279 if (sym_op->m_dst) 280 return build_proto_compound_fd(sess, op, fd, bpid, qp); 281 282 struct ctxt_priv *priv = sess->ctxt; 283 struct sec_flow_context *flc; 284 struct rte_mbuf *mbuf = sym_op->m_src; 285 286 if (likely(bpid < MAX_BPID)) 287 DPAA2_SET_FD_BPID(fd, bpid); 288 else 289 DPAA2_SET_FD_IVP(fd); 290 291 /* Save the shared descriptor */ 292 flc = &priv->flc_desc[0].flc; 293 294 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 295 DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off); 296 DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len); 297 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 298 299 /* save physical address of mbuf */ 300 op->sym->aead.digest.phys_addr = mbuf->buf_iova; 301 mbuf->buf_iova = (size_t)op; 302 303 return 0; 304 } 305 306 static inline int 307 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess, 308 struct rte_crypto_op *op, 309 struct qbman_fd *fd, __rte_unused uint16_t bpid) 310 { 311 struct rte_crypto_sym_op *sym_op = op->sym; 312 struct ctxt_priv *priv = sess->ctxt; 313 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 314 struct sec_flow_context *flc; 315 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len; 316 int icv_len = sess->digest_length; 317 uint8_t *old_icv; 318 struct rte_mbuf *mbuf; 319 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 320 sess->iv.offset); 321 322 if (sym_op->m_dst) 323 mbuf = sym_op->m_dst; 324 else 325 mbuf = sym_op->m_src; 326 327 /* first FLE entry used to store mbuf and session ctxt */ 328 fle = (struct qbman_fle *)rte_malloc(NULL, 329 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 330 RTE_CACHE_LINE_SIZE); 331 if (unlikely(!fle)) { 332 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE"); 333 return -ENOMEM; 334 } 335 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 336 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 337 DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv); 338 339 op_fle = fle + 1; 340 ip_fle = fle + 2; 341 sge = fle + 3; 342 343 /* Save the shared descriptor */ 344 flc = &priv->flc_desc[0].flc; 345 346 /* Configure FD as a FRAME LIST */ 347 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 348 DPAA2_SET_FD_COMPOUND_FMT(fd); 349 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 350 351 DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d", 352 sym_op->aead.data.offset, 353 sym_op->aead.data.length, 354 sess->digest_length); 355 DPAA2_SEC_DP_DEBUG("iv-len=%d data_off: 0x%x", 356 sess->iv.length, 357 sym_op->m_src->data_off); 358 359 /* Configure Output FLE with Scatter/Gather Entry */ 360 DPAA2_SET_FLE_SG_EXT(op_fle); 361 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 362 363 if (auth_only_len) 364 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len); 365 366 op_fle->length = (sess->dir == DIR_ENC) ? 367 (sym_op->aead.data.length + icv_len) : 368 sym_op->aead.data.length; 369 370 /* Configure Output SGE for Encap/Decap */ 371 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + sym_op->aead.data.offset); 372 sge->length = mbuf->data_len - sym_op->aead.data.offset; 373 374 mbuf = mbuf->next; 375 /* o/p segs */ 376 while (mbuf) { 377 sge++; 378 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); 379 sge->length = mbuf->data_len; 380 mbuf = mbuf->next; 381 } 382 sge->length -= icv_len; 383 384 if (sess->dir == DIR_ENC) { 385 sge++; 386 DPAA2_SET_FLE_ADDR(sge, 387 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data)); 388 sge->length = icv_len; 389 } 390 DPAA2_SET_FLE_FIN(sge); 391 392 sge++; 393 mbuf = sym_op->m_src; 394 395 /* Configure Input FLE with Scatter/Gather Entry */ 396 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 397 DPAA2_SET_FLE_SG_EXT(ip_fle); 398 DPAA2_SET_FLE_FIN(ip_fle); 399 ip_fle->length = (sess->dir == DIR_ENC) ? 400 (sym_op->aead.data.length + sess->iv.length + auth_only_len) : 401 (sym_op->aead.data.length + sess->iv.length + auth_only_len + 402 icv_len); 403 404 /* Configure Input SGE for Encap/Decap */ 405 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr)); 406 sge->length = sess->iv.length; 407 408 sge++; 409 if (auth_only_len) { 410 DPAA2_SET_FLE_ADDR(sge, 411 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data)); 412 sge->length = auth_only_len; 413 sge++; 414 } 415 416 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + sym_op->aead.data.offset); 417 sge->length = mbuf->data_len - sym_op->aead.data.offset; 418 419 mbuf = mbuf->next; 420 /* i/p segs */ 421 while (mbuf) { 422 sge++; 423 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); 424 sge->length = mbuf->data_len; 425 mbuf = mbuf->next; 426 } 427 428 if (sess->dir == DIR_DEC) { 429 sge++; 430 old_icv = (uint8_t *)(sge + 1); 431 memcpy(old_icv, sym_op->aead.digest.data, icv_len); 432 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 433 sge->length = icv_len; 434 } 435 436 DPAA2_SET_FLE_FIN(sge); 437 if (auth_only_len) { 438 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len); 439 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 440 } 441 DPAA2_SET_FD_LEN(fd, ip_fle->length); 442 443 return 0; 444 } 445 446 static inline int 447 build_authenc_gcm_fd(dpaa2_sec_session *sess, 448 struct rte_crypto_op *op, 449 struct qbman_fd *fd, uint16_t bpid, 450 struct dpaa2_sec_qp *qp) 451 { 452 struct rte_crypto_sym_op *sym_op = op->sym; 453 struct ctxt_priv *priv = sess->ctxt; 454 struct qbman_fle *fle, *sge; 455 struct sec_flow_context *flc; 456 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len; 457 int icv_len = sess->digest_length, retval; 458 uint8_t *old_icv; 459 struct rte_mbuf *dst; 460 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 461 sess->iv.offset); 462 463 if (sym_op->m_dst) 464 dst = sym_op->m_dst; 465 else 466 dst = sym_op->m_src; 467 468 /* TODO we are using the first FLE entry to store Mbuf and session ctxt. 469 * Currently we donot know which FLE has the mbuf stored. 470 * So while retreiving we can go back 1 FLE from the FD -ADDR 471 * to get the MBUF Addr from the previous FLE. 472 * We can have a better approach to use the inline Mbuf 473 */ 474 retval = rte_mempool_get(qp->fle_pool, (void **)(&fle)); 475 if (retval) { 476 DPAA2_SEC_DP_DEBUG("GCM: no buffer available in fle pool"); 477 return -ENOMEM; 478 } 479 memset(fle, 0, FLE_POOL_BUF_SIZE); 480 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 481 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 482 fle = fle + 1; 483 sge = fle + 2; 484 if (likely(bpid < MAX_BPID)) { 485 DPAA2_SET_FD_BPID(fd, bpid); 486 DPAA2_SET_FLE_BPID(fle, bpid); 487 DPAA2_SET_FLE_BPID(fle + 1, bpid); 488 DPAA2_SET_FLE_BPID(sge, bpid); 489 DPAA2_SET_FLE_BPID(sge + 1, bpid); 490 DPAA2_SET_FLE_BPID(sge + 2, bpid); 491 DPAA2_SET_FLE_BPID(sge + 3, bpid); 492 } else { 493 DPAA2_SET_FD_IVP(fd); 494 DPAA2_SET_FLE_IVP(fle); 495 DPAA2_SET_FLE_IVP((fle + 1)); 496 DPAA2_SET_FLE_IVP(sge); 497 DPAA2_SET_FLE_IVP((sge + 1)); 498 DPAA2_SET_FLE_IVP((sge + 2)); 499 DPAA2_SET_FLE_IVP((sge + 3)); 500 } 501 502 /* Save the shared descriptor */ 503 flc = &priv->flc_desc[0].flc; 504 /* Configure FD as a FRAME LIST */ 505 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 506 DPAA2_SET_FD_COMPOUND_FMT(fd); 507 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 508 509 DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d", 510 sym_op->aead.data.offset, 511 sym_op->aead.data.length, 512 sess->digest_length); 513 DPAA2_SEC_DP_DEBUG("iv-len=%d data_off: 0x%x", 514 sess->iv.length, 515 sym_op->m_src->data_off); 516 517 /* Configure Output FLE with Scatter/Gather Entry */ 518 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 519 if (auth_only_len) 520 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 521 fle->length = (sess->dir == DIR_ENC) ? 522 (sym_op->aead.data.length + icv_len) : 523 sym_op->aead.data.length; 524 525 DPAA2_SET_FLE_SG_EXT(fle); 526 527 /* Configure Output SGE for Encap/Decap */ 528 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(dst) + sym_op->aead.data.offset); 529 sge->length = sym_op->aead.data.length; 530 531 if (sess->dir == DIR_ENC) { 532 sge++; 533 DPAA2_SET_FLE_ADDR(sge, 534 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data)); 535 sge->length = sess->digest_length; 536 } 537 DPAA2_SET_FLE_FIN(sge); 538 539 sge++; 540 fle++; 541 542 /* Configure Input FLE with Scatter/Gather Entry */ 543 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 544 DPAA2_SET_FLE_SG_EXT(fle); 545 DPAA2_SET_FLE_FIN(fle); 546 fle->length = (sess->dir == DIR_ENC) ? 547 (sym_op->aead.data.length + sess->iv.length + auth_only_len) : 548 (sym_op->aead.data.length + sess->iv.length + auth_only_len + 549 sess->digest_length); 550 551 /* Configure Input SGE for Encap/Decap */ 552 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr)); 553 sge->length = sess->iv.length; 554 sge++; 555 if (auth_only_len) { 556 DPAA2_SET_FLE_ADDR(sge, 557 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data)); 558 sge->length = auth_only_len; 559 DPAA2_SET_FLE_BPID(sge, bpid); 560 sge++; 561 } 562 563 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(sym_op->m_src) + sym_op->aead.data.offset); 564 sge->length = sym_op->aead.data.length; 565 if (sess->dir == DIR_DEC) { 566 sge++; 567 old_icv = (uint8_t *)(sge + 1); 568 memcpy(old_icv, sym_op->aead.digest.data, 569 sess->digest_length); 570 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 571 sge->length = sess->digest_length; 572 } 573 DPAA2_SET_FLE_FIN(sge); 574 575 if (auth_only_len) { 576 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 577 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 578 } 579 580 DPAA2_SET_FD_LEN(fd, fle->length); 581 return 0; 582 } 583 584 static inline int 585 build_authenc_sg_fd(dpaa2_sec_session *sess, 586 struct rte_crypto_op *op, 587 struct qbman_fd *fd, __rte_unused uint16_t bpid) 588 { 589 struct rte_crypto_sym_op *sym_op = op->sym; 590 struct ctxt_priv *priv = sess->ctxt; 591 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 592 struct sec_flow_context *flc; 593 uint16_t auth_hdr_len = sym_op->cipher.data.offset - 594 sym_op->auth.data.offset; 595 uint16_t auth_tail_len = sym_op->auth.data.length - 596 sym_op->cipher.data.length - auth_hdr_len; 597 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len; 598 int icv_len = sess->digest_length; 599 uint8_t *old_icv; 600 struct rte_mbuf *mbuf; 601 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 602 sess->iv.offset); 603 604 if (sym_op->m_dst) 605 mbuf = sym_op->m_dst; 606 else 607 mbuf = sym_op->m_src; 608 609 /* first FLE entry used to store mbuf and session ctxt */ 610 fle = (struct qbman_fle *)rte_malloc(NULL, 611 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 612 RTE_CACHE_LINE_SIZE); 613 if (unlikely(!fle)) { 614 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE"); 615 return -ENOMEM; 616 } 617 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 618 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 619 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 620 621 op_fle = fle + 1; 622 ip_fle = fle + 2; 623 sge = fle + 3; 624 625 /* Save the shared descriptor */ 626 flc = &priv->flc_desc[0].flc; 627 628 /* Configure FD as a FRAME LIST */ 629 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 630 DPAA2_SET_FD_COMPOUND_FMT(fd); 631 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 632 633 DPAA2_SEC_DP_DEBUG( 634 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d", 635 sym_op->auth.data.offset, 636 sym_op->auth.data.length, 637 sess->digest_length); 638 DPAA2_SEC_DP_DEBUG( 639 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x", 640 sym_op->cipher.data.offset, 641 sym_op->cipher.data.length, 642 sess->iv.length, 643 sym_op->m_src->data_off); 644 645 /* Configure Output FLE with Scatter/Gather Entry */ 646 DPAA2_SET_FLE_SG_EXT(op_fle); 647 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 648 649 if (auth_only_len) 650 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len); 651 652 op_fle->length = (sess->dir == DIR_ENC) ? 653 (sym_op->cipher.data.length + icv_len) : 654 sym_op->cipher.data.length; 655 656 /* Configure Output SGE for Encap/Decap */ 657 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + sym_op->auth.data.offset); 658 sge->length = mbuf->data_len - sym_op->auth.data.offset; 659 660 mbuf = mbuf->next; 661 /* o/p segs */ 662 while (mbuf) { 663 sge++; 664 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); 665 sge->length = mbuf->data_len; 666 mbuf = mbuf->next; 667 } 668 sge->length -= icv_len; 669 670 if (sess->dir == DIR_ENC) { 671 sge++; 672 DPAA2_SET_FLE_ADDR(sge, 673 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 674 sge->length = icv_len; 675 } 676 DPAA2_SET_FLE_FIN(sge); 677 678 sge++; 679 mbuf = sym_op->m_src; 680 681 /* Configure Input FLE with Scatter/Gather Entry */ 682 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 683 DPAA2_SET_FLE_SG_EXT(ip_fle); 684 DPAA2_SET_FLE_FIN(ip_fle); 685 ip_fle->length = (sess->dir == DIR_ENC) ? 686 (sym_op->auth.data.length + sess->iv.length) : 687 (sym_op->auth.data.length + sess->iv.length + 688 icv_len); 689 690 /* Configure Input SGE for Encap/Decap */ 691 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 692 sge->length = sess->iv.length; 693 694 sge++; 695 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + sym_op->auth.data.offset); 696 sge->length = mbuf->data_len - sym_op->auth.data.offset; 697 698 mbuf = mbuf->next; 699 /* i/p segs */ 700 while (mbuf) { 701 sge++; 702 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); 703 sge->length = mbuf->data_len; 704 mbuf = mbuf->next; 705 } 706 sge->length -= icv_len; 707 708 if (sess->dir == DIR_DEC) { 709 sge++; 710 old_icv = (uint8_t *)(sge + 1); 711 memcpy(old_icv, sym_op->auth.digest.data, 712 icv_len); 713 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 714 sge->length = icv_len; 715 } 716 717 DPAA2_SET_FLE_FIN(sge); 718 if (auth_only_len) { 719 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len); 720 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 721 } 722 DPAA2_SET_FD_LEN(fd, ip_fle->length); 723 724 return 0; 725 } 726 727 static inline int 728 build_authenc_fd(dpaa2_sec_session *sess, 729 struct rte_crypto_op *op, 730 struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp) 731 { 732 struct rte_crypto_sym_op *sym_op = op->sym; 733 struct ctxt_priv *priv = sess->ctxt; 734 struct qbman_fle *fle, *sge; 735 struct sec_flow_context *flc; 736 uint16_t auth_hdr_len = sym_op->cipher.data.offset - 737 sym_op->auth.data.offset; 738 uint16_t auth_tail_len = sym_op->auth.data.length - 739 sym_op->cipher.data.length - auth_hdr_len; 740 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len; 741 742 int icv_len = sess->digest_length, retval; 743 uint8_t *old_icv; 744 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 745 sess->iv.offset); 746 struct rte_mbuf *dst; 747 748 if (sym_op->m_dst) 749 dst = sym_op->m_dst; 750 else 751 dst = sym_op->m_src; 752 753 /* we are using the first FLE entry to store Mbuf. 754 * Currently we donot know which FLE has the mbuf stored. 755 * So while retreiving we can go back 1 FLE from the FD -ADDR 756 * to get the MBUF Addr from the previous FLE. 757 * We can have a better approach to use the inline Mbuf 758 */ 759 retval = rte_mempool_get(qp->fle_pool, (void **)(&fle)); 760 if (retval) { 761 DPAA2_SEC_DP_DEBUG("AUTHENC: no buffer available in fle pool"); 762 return -ENOMEM; 763 } 764 memset(fle, 0, FLE_POOL_BUF_SIZE); 765 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 766 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 767 fle = fle + 1; 768 sge = fle + 2; 769 if (likely(bpid < MAX_BPID)) { 770 DPAA2_SET_FD_BPID(fd, bpid); 771 DPAA2_SET_FLE_BPID(fle, bpid); 772 DPAA2_SET_FLE_BPID(fle + 1, bpid); 773 DPAA2_SET_FLE_BPID(sge, bpid); 774 DPAA2_SET_FLE_BPID(sge + 1, bpid); 775 DPAA2_SET_FLE_BPID(sge + 2, bpid); 776 DPAA2_SET_FLE_BPID(sge + 3, bpid); 777 } else { 778 DPAA2_SET_FD_IVP(fd); 779 DPAA2_SET_FLE_IVP(fle); 780 DPAA2_SET_FLE_IVP((fle + 1)); 781 DPAA2_SET_FLE_IVP(sge); 782 DPAA2_SET_FLE_IVP((sge + 1)); 783 DPAA2_SET_FLE_IVP((sge + 2)); 784 DPAA2_SET_FLE_IVP((sge + 3)); 785 } 786 787 /* Save the shared descriptor */ 788 flc = &priv->flc_desc[0].flc; 789 /* Configure FD as a FRAME LIST */ 790 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 791 DPAA2_SET_FD_COMPOUND_FMT(fd); 792 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 793 794 DPAA2_SEC_DP_DEBUG( 795 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d", 796 sym_op->auth.data.offset, 797 sym_op->auth.data.length, 798 sess->digest_length); 799 DPAA2_SEC_DP_DEBUG( 800 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x", 801 sym_op->cipher.data.offset, 802 sym_op->cipher.data.length, 803 sess->iv.length, 804 sym_op->m_src->data_off); 805 806 /* Configure Output FLE with Scatter/Gather Entry */ 807 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 808 if (auth_only_len) 809 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 810 fle->length = (sess->dir == DIR_ENC) ? 811 (sym_op->cipher.data.length + icv_len) : 812 sym_op->cipher.data.length; 813 814 DPAA2_SET_FLE_SG_EXT(fle); 815 816 /* Configure Output SGE for Encap/Decap */ 817 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(dst) + sym_op->cipher.data.offset); 818 sge->length = sym_op->cipher.data.length; 819 820 if (sess->dir == DIR_ENC) { 821 sge++; 822 DPAA2_SET_FLE_ADDR(sge, 823 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 824 sge->length = sess->digest_length; 825 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 826 sess->iv.length)); 827 } 828 DPAA2_SET_FLE_FIN(sge); 829 830 sge++; 831 fle++; 832 833 /* Configure Input FLE with Scatter/Gather Entry */ 834 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 835 DPAA2_SET_FLE_SG_EXT(fle); 836 DPAA2_SET_FLE_FIN(fle); 837 fle->length = (sess->dir == DIR_ENC) ? 838 (sym_op->auth.data.length + sess->iv.length) : 839 (sym_op->auth.data.length + sess->iv.length + 840 sess->digest_length); 841 842 /* Configure Input SGE for Encap/Decap */ 843 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 844 sge->length = sess->iv.length; 845 sge++; 846 847 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(sym_op->m_src) + sym_op->auth.data.offset); 848 sge->length = sym_op->auth.data.length; 849 if (sess->dir == DIR_DEC) { 850 sge++; 851 old_icv = (uint8_t *)(sge + 1); 852 memcpy(old_icv, sym_op->auth.digest.data, 853 sess->digest_length); 854 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 855 sge->length = sess->digest_length; 856 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 857 sess->digest_length + 858 sess->iv.length)); 859 } 860 DPAA2_SET_FLE_FIN(sge); 861 if (auth_only_len) { 862 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 863 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 864 } 865 return 0; 866 } 867 868 static inline int build_auth_sg_fd( 869 dpaa2_sec_session *sess, 870 struct rte_crypto_op *op, 871 struct qbman_fd *fd, 872 __rte_unused uint16_t bpid) 873 { 874 struct rte_crypto_sym_op *sym_op = op->sym; 875 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 876 struct sec_flow_context *flc; 877 struct ctxt_priv *priv = sess->ctxt; 878 int data_len, data_offset; 879 uint8_t *old_digest; 880 struct rte_mbuf *mbuf; 881 882 data_len = sym_op->auth.data.length; 883 data_offset = sym_op->auth.data.offset; 884 885 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 886 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 887 if ((data_len & 7) || (data_offset & 7)) { 888 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes"); 889 return -ENOTSUP; 890 } 891 892 data_len = data_len >> 3; 893 data_offset = data_offset >> 3; 894 } 895 896 mbuf = sym_op->m_src; 897 fle = (struct qbman_fle *)rte_malloc(NULL, 898 FLE_SG_MEM_SIZE(mbuf->nb_segs), 899 RTE_CACHE_LINE_SIZE); 900 if (unlikely(!fle)) { 901 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE"); 902 return -ENOMEM; 903 } 904 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs)); 905 /* first FLE entry used to store mbuf and session ctxt */ 906 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 907 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 908 op_fle = fle + 1; 909 ip_fle = fle + 2; 910 sge = fle + 3; 911 912 flc = &priv->flc_desc[DESC_INITFINAL].flc; 913 /* sg FD */ 914 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 915 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 916 DPAA2_SET_FD_COMPOUND_FMT(fd); 917 918 /* o/p fle */ 919 DPAA2_SET_FLE_ADDR(op_fle, 920 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 921 op_fle->length = sess->digest_length; 922 923 /* i/p fle */ 924 DPAA2_SET_FLE_SG_EXT(ip_fle); 925 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 926 ip_fle->length = data_len; 927 928 if (sess->iv.length) { 929 uint8_t *iv_ptr; 930 931 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 932 sess->iv.offset); 933 934 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 935 iv_ptr = conv_to_snow_f9_iv(iv_ptr); 936 sge->length = 12; 937 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 938 iv_ptr = conv_to_zuc_eia_iv(iv_ptr); 939 sge->length = 8; 940 } else { 941 sge->length = sess->iv.length; 942 } 943 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 944 ip_fle->length += sge->length; 945 sge++; 946 } 947 /* i/p 1st seg */ 948 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + data_offset); 949 950 if (data_len <= (mbuf->data_len - data_offset)) { 951 sge->length = data_len; 952 data_len = 0; 953 } else { 954 sge->length = mbuf->data_len - data_offset; 955 956 /* remaining i/p segs */ 957 while ((data_len = data_len - sge->length) && 958 (mbuf = mbuf->next)) { 959 sge++; 960 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); 961 if (data_len > mbuf->data_len) 962 sge->length = mbuf->data_len; 963 else 964 sge->length = data_len; 965 } 966 } 967 968 if (sess->dir == DIR_DEC) { 969 /* Digest verification case */ 970 sge++; 971 old_digest = (uint8_t *)(sge + 1); 972 rte_memcpy(old_digest, sym_op->auth.digest.data, 973 sess->digest_length); 974 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest)); 975 sge->length = sess->digest_length; 976 ip_fle->length += sess->digest_length; 977 } 978 DPAA2_SET_FLE_FIN(sge); 979 DPAA2_SET_FLE_FIN(ip_fle); 980 DPAA2_SET_FD_LEN(fd, ip_fle->length); 981 982 return 0; 983 } 984 985 static inline int 986 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 987 struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp) 988 { 989 struct rte_crypto_sym_op *sym_op = op->sym; 990 struct qbman_fle *fle, *sge; 991 struct sec_flow_context *flc; 992 struct ctxt_priv *priv = sess->ctxt; 993 int data_len, data_offset; 994 uint8_t *old_digest; 995 int retval; 996 997 data_len = sym_op->auth.data.length; 998 data_offset = sym_op->auth.data.offset; 999 1000 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 1001 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 1002 if ((data_len & 7) || (data_offset & 7)) { 1003 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes"); 1004 return -ENOTSUP; 1005 } 1006 1007 data_len = data_len >> 3; 1008 data_offset = data_offset >> 3; 1009 } 1010 1011 retval = rte_mempool_get(qp->fle_pool, (void **)(&fle)); 1012 if (retval) { 1013 DPAA2_SEC_DP_DEBUG("AUTH: no buffer available in fle pool"); 1014 return -ENOMEM; 1015 } 1016 memset(fle, 0, FLE_POOL_BUF_SIZE); 1017 /* TODO we are using the first FLE entry to store Mbuf. 1018 * Currently we donot know which FLE has the mbuf stored. 1019 * So while retreiving we can go back 1 FLE from the FD -ADDR 1020 * to get the MBUF Addr from the previous FLE. 1021 * We can have a better approach to use the inline Mbuf 1022 */ 1023 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 1024 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 1025 fle = fle + 1; 1026 sge = fle + 2; 1027 1028 if (likely(bpid < MAX_BPID)) { 1029 DPAA2_SET_FD_BPID(fd, bpid); 1030 DPAA2_SET_FLE_BPID(fle, bpid); 1031 DPAA2_SET_FLE_BPID(fle + 1, bpid); 1032 DPAA2_SET_FLE_BPID(sge, bpid); 1033 DPAA2_SET_FLE_BPID(sge + 1, bpid); 1034 } else { 1035 DPAA2_SET_FD_IVP(fd); 1036 DPAA2_SET_FLE_IVP(fle); 1037 DPAA2_SET_FLE_IVP((fle + 1)); 1038 DPAA2_SET_FLE_IVP(sge); 1039 DPAA2_SET_FLE_IVP((sge + 1)); 1040 } 1041 1042 flc = &priv->flc_desc[DESC_INITFINAL].flc; 1043 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1044 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 1045 DPAA2_SET_FD_COMPOUND_FMT(fd); 1046 1047 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 1048 fle->length = sess->digest_length; 1049 fle++; 1050 1051 /* Setting input FLE */ 1052 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 1053 DPAA2_SET_FLE_SG_EXT(fle); 1054 fle->length = data_len; 1055 1056 if (sess->iv.length) { 1057 uint8_t *iv_ptr; 1058 1059 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1060 sess->iv.offset); 1061 1062 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 1063 iv_ptr = conv_to_snow_f9_iv(iv_ptr); 1064 sge->length = 12; 1065 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 1066 iv_ptr = conv_to_zuc_eia_iv(iv_ptr); 1067 sge->length = 8; 1068 } else { 1069 sge->length = sess->iv.length; 1070 } 1071 1072 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1073 fle->length = fle->length + sge->length; 1074 sge++; 1075 } 1076 1077 /* Setting data to authenticate */ 1078 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(sym_op->m_src) + data_offset); 1079 sge->length = data_len; 1080 1081 if (sess->dir == DIR_DEC) { 1082 sge++; 1083 old_digest = (uint8_t *)(sge + 1); 1084 rte_memcpy(old_digest, sym_op->auth.digest.data, 1085 sess->digest_length); 1086 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest)); 1087 sge->length = sess->digest_length; 1088 fle->length = fle->length + sess->digest_length; 1089 } 1090 1091 DPAA2_SET_FLE_FIN(sge); 1092 DPAA2_SET_FLE_FIN(fle); 1093 DPAA2_SET_FD_LEN(fd, fle->length); 1094 1095 return 0; 1096 } 1097 1098 static int 1099 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 1100 struct qbman_fd *fd, uint16_t bpid) 1101 { 1102 struct rte_crypto_sym_op *sym_op = op->sym; 1103 struct qbman_fle *ip_fle, *op_fle, *sge, *fle; 1104 int data_len, data_offset; 1105 struct sec_flow_context *flc; 1106 struct ctxt_priv *priv = sess->ctxt; 1107 struct rte_mbuf *mbuf; 1108 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1109 sess->iv.offset); 1110 #if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL) 1111 char debug_str[1024]; 1112 int offset; 1113 #endif 1114 1115 data_len = sym_op->cipher.data.length; 1116 data_offset = sym_op->cipher.data.offset; 1117 1118 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 1119 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 1120 if ((data_len & 7) || (data_offset & 7)) { 1121 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes"); 1122 return -ENOTSUP; 1123 } 1124 1125 data_len = data_len >> 3; 1126 data_offset = data_offset >> 3; 1127 } 1128 1129 if (sym_op->m_dst) 1130 mbuf = sym_op->m_dst; 1131 else 1132 mbuf = sym_op->m_src; 1133 1134 /* first FLE entry used to store mbuf and session ctxt */ 1135 fle = (struct qbman_fle *)rte_malloc(NULL, 1136 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 1137 RTE_CACHE_LINE_SIZE); 1138 if (!fle) { 1139 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE"); 1140 return -ENOMEM; 1141 } 1142 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 1143 /* first FLE entry used to store mbuf and session ctxt */ 1144 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 1145 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 1146 1147 op_fle = fle + 1; 1148 ip_fle = fle + 2; 1149 sge = fle + 3; 1150 1151 flc = &priv->flc_desc[0].flc; 1152 1153 DPAA2_SEC_DP_DEBUG( 1154 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d" 1155 " data_off: 0x%x", 1156 data_offset, 1157 data_len, 1158 sess->iv.length, 1159 sym_op->m_src->data_off); 1160 1161 /* o/p fle */ 1162 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 1163 op_fle->length = data_len; 1164 DPAA2_SET_FLE_SG_EXT(op_fle); 1165 1166 /* o/p 1st seg */ 1167 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + data_offset); 1168 sge->length = mbuf->data_len - data_offset; 1169 1170 mbuf = mbuf->next; 1171 /* o/p segs */ 1172 while (mbuf) { 1173 sge++; 1174 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); 1175 sge->length = mbuf->data_len; 1176 mbuf = mbuf->next; 1177 } 1178 DPAA2_SET_FLE_FIN(sge); 1179 1180 DPAA2_SEC_DP_DEBUG( 1181 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d", 1182 flc, fle, fle->addr_hi, fle->addr_lo, 1183 fle->length); 1184 1185 /* i/p fle */ 1186 mbuf = sym_op->m_src; 1187 sge++; 1188 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 1189 ip_fle->length = sess->iv.length + data_len; 1190 DPAA2_SET_FLE_SG_EXT(ip_fle); 1191 1192 /* i/p IV */ 1193 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1194 sge->length = sess->iv.length; 1195 1196 sge++; 1197 1198 /* i/p 1st seg */ 1199 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + data_offset); 1200 sge->length = mbuf->data_len - data_offset; 1201 1202 mbuf = mbuf->next; 1203 /* i/p segs */ 1204 while (mbuf) { 1205 sge++; 1206 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); 1207 sge->length = mbuf->data_len; 1208 mbuf = mbuf->next; 1209 } 1210 DPAA2_SET_FLE_FIN(sge); 1211 DPAA2_SET_FLE_FIN(ip_fle); 1212 1213 /* sg fd */ 1214 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 1215 DPAA2_SET_FD_LEN(fd, ip_fle->length); 1216 DPAA2_SET_FD_COMPOUND_FMT(fd); 1217 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1218 1219 #if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL) 1220 offset = sprintf(debug_str, 1221 "CIPHER SG: fdaddr =%" PRIx64 ", from %s pool ", 1222 DPAA2_GET_FD_ADDR(fd), 1223 bpid < MAX_BPID ? "SW" : "BMAN"); 1224 if (bpid < MAX_BPID) { 1225 offset += sprintf(&debug_str[offset], 1226 "bpid = %d ", bpid); 1227 } 1228 offset += sprintf(&debug_str[offset], 1229 "private size = %d ", 1230 mbuf->pool->private_data_size); 1231 offset += sprintf(&debug_str[offset], 1232 "off =%d, len =%d", 1233 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_LEN(fd)); 1234 DPAA2_SEC_DP_DEBUG("%s", debug_str); 1235 #else 1236 RTE_SET_USED(bpid); 1237 #endif 1238 1239 return 0; 1240 } 1241 1242 static int 1243 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 1244 struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp) 1245 { 1246 struct rte_crypto_sym_op *sym_op = op->sym; 1247 struct qbman_fle *fle, *sge; 1248 int retval, data_len, data_offset; 1249 struct sec_flow_context *flc; 1250 struct ctxt_priv *priv = sess->ctxt; 1251 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1252 sess->iv.offset); 1253 struct rte_mbuf *dst; 1254 #if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL) 1255 char debug_str[1024]; 1256 int offset; 1257 #endif 1258 1259 data_len = sym_op->cipher.data.length; 1260 data_offset = sym_op->cipher.data.offset; 1261 1262 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 1263 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 1264 if ((data_len & 7) || (data_offset & 7)) { 1265 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes"); 1266 return -ENOTSUP; 1267 } 1268 1269 data_len = data_len >> 3; 1270 data_offset = data_offset >> 3; 1271 } 1272 1273 if (sym_op->m_dst) 1274 dst = sym_op->m_dst; 1275 else 1276 dst = sym_op->m_src; 1277 1278 retval = rte_mempool_get(qp->fle_pool, (void **)(&fle)); 1279 if (retval) { 1280 DPAA2_SEC_DP_DEBUG("CIPHER: no buffer available in fle pool"); 1281 return -ENOMEM; 1282 } 1283 memset(fle, 0, FLE_POOL_BUF_SIZE); 1284 /* TODO we are using the first FLE entry to store Mbuf. 1285 * Currently we donot know which FLE has the mbuf stored. 1286 * So while retreiving we can go back 1 FLE from the FD -ADDR 1287 * to get the MBUF Addr from the previous FLE. 1288 * We can have a better approach to use the inline Mbuf 1289 */ 1290 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 1291 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 1292 fle = fle + 1; 1293 sge = fle + 2; 1294 1295 if (likely(bpid < MAX_BPID)) { 1296 DPAA2_SET_FD_BPID(fd, bpid); 1297 DPAA2_SET_FLE_BPID(fle, bpid); 1298 DPAA2_SET_FLE_BPID(fle + 1, bpid); 1299 DPAA2_SET_FLE_BPID(sge, bpid); 1300 DPAA2_SET_FLE_BPID(sge + 1, bpid); 1301 } else { 1302 DPAA2_SET_FD_IVP(fd); 1303 DPAA2_SET_FLE_IVP(fle); 1304 DPAA2_SET_FLE_IVP((fle + 1)); 1305 DPAA2_SET_FLE_IVP(sge); 1306 DPAA2_SET_FLE_IVP((sge + 1)); 1307 } 1308 1309 flc = &priv->flc_desc[0].flc; 1310 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 1311 DPAA2_SET_FD_LEN(fd, data_len + sess->iv.length); 1312 DPAA2_SET_FD_COMPOUND_FMT(fd); 1313 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1314 1315 DPAA2_SEC_DP_DEBUG( 1316 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d," 1317 " data_off: 0x%x", 1318 data_offset, 1319 data_len, 1320 sess->iv.length, 1321 sym_op->m_src->data_off); 1322 1323 DPAA2_SET_FLE_ADDR(fle, rte_pktmbuf_iova(dst) + data_offset); 1324 1325 fle->length = data_len + sess->iv.length; 1326 1327 DPAA2_SEC_DP_DEBUG( 1328 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d", 1329 flc, fle, fle->addr_hi, fle->addr_lo, 1330 fle->length); 1331 1332 fle++; 1333 1334 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 1335 fle->length = data_len + sess->iv.length; 1336 1337 DPAA2_SET_FLE_SG_EXT(fle); 1338 1339 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1340 sge->length = sess->iv.length; 1341 1342 sge++; 1343 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(sym_op->m_src) + data_offset); 1344 1345 sge->length = data_len; 1346 DPAA2_SET_FLE_FIN(sge); 1347 DPAA2_SET_FLE_FIN(fle); 1348 1349 #if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL) 1350 offset = sprintf(debug_str, 1351 "CIPHER: fdaddr =%" PRIx64 ", from %s pool ", 1352 DPAA2_GET_FD_ADDR(fd), 1353 bpid < MAX_BPID ? "SW" : "BMAN"); 1354 if (bpid < MAX_BPID) { 1355 offset += sprintf(&debug_str[offset], 1356 "bpid = %d ", bpid); 1357 } 1358 offset += sprintf(&debug_str[offset], 1359 "private size = %d ", 1360 dst->pool->private_data_size); 1361 offset += sprintf(&debug_str[offset], 1362 "off =%d, len =%d", 1363 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_LEN(fd)); 1364 DPAA2_SEC_DP_DEBUG("%s", debug_str); 1365 #endif 1366 1367 return 0; 1368 } 1369 1370 static inline int 1371 build_sec_fd(struct rte_crypto_op *op, 1372 struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp) 1373 { 1374 int ret = -1; 1375 dpaa2_sec_session *sess; 1376 1377 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { 1378 sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session); 1379 } else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 1380 sess = SECURITY_GET_SESS_PRIV(op->sym->session); 1381 } else { 1382 DPAA2_SEC_DP_ERR("Session type invalid"); 1383 return -ENOTSUP; 1384 } 1385 1386 if (!sess) { 1387 DPAA2_SEC_DP_ERR("Session not available"); 1388 return -EINVAL; 1389 } 1390 1391 /* Any of the buffer is segmented*/ 1392 if (!rte_pktmbuf_is_contiguous(op->sym->m_src) || 1393 ((op->sym->m_dst != NULL) && 1394 !rte_pktmbuf_is_contiguous(op->sym->m_dst))) { 1395 switch (sess->ctxt_type) { 1396 case DPAA2_SEC_CIPHER: 1397 ret = build_cipher_sg_fd(sess, op, fd, bpid); 1398 break; 1399 case DPAA2_SEC_AUTH: 1400 ret = build_auth_sg_fd(sess, op, fd, bpid); 1401 break; 1402 case DPAA2_SEC_AEAD: 1403 ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid); 1404 break; 1405 case DPAA2_SEC_CIPHER_HASH: 1406 ret = build_authenc_sg_fd(sess, op, fd, bpid); 1407 break; 1408 case DPAA2_SEC_IPSEC: 1409 case DPAA2_SEC_PDCP: 1410 ret = build_proto_compound_sg_fd(sess, op, fd, bpid); 1411 break; 1412 default: 1413 DPAA2_SEC_ERR("error: Unsupported session %d", 1414 sess->ctxt_type); 1415 ret = -ENOTSUP; 1416 } 1417 } else { 1418 switch (sess->ctxt_type) { 1419 case DPAA2_SEC_CIPHER: 1420 ret = build_cipher_fd(sess, op, fd, bpid, qp); 1421 break; 1422 case DPAA2_SEC_AUTH: 1423 ret = build_auth_fd(sess, op, fd, bpid, qp); 1424 break; 1425 case DPAA2_SEC_AEAD: 1426 ret = build_authenc_gcm_fd(sess, op, fd, bpid, qp); 1427 break; 1428 case DPAA2_SEC_CIPHER_HASH: 1429 ret = build_authenc_fd(sess, op, fd, bpid, qp); 1430 break; 1431 case DPAA2_SEC_IPSEC: 1432 ret = build_proto_fd(sess, op, fd, bpid, qp); 1433 break; 1434 case DPAA2_SEC_PDCP: 1435 ret = build_proto_compound_fd(sess, op, fd, bpid, qp); 1436 break; 1437 default: 1438 DPAA2_SEC_ERR("error: Unsupported session%d", 1439 sess->ctxt_type); 1440 ret = -ENOTSUP; 1441 } 1442 } 1443 return ret; 1444 } 1445 1446 static uint16_t 1447 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, 1448 uint16_t nb_ops) 1449 { 1450 /* Function to transmit the frames to given device and VQ*/ 1451 uint32_t loop; 1452 int32_t ret; 1453 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 1454 uint32_t frames_to_send, retry_count; 1455 struct qbman_eq_desc eqdesc; 1456 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 1457 struct qbman_swp *swp; 1458 uint16_t num_tx = 0; 1459 uint32_t flags[MAX_TX_RING_SLOTS] = {0}; 1460 /*todo - need to support multiple buffer pools */ 1461 uint16_t bpid; 1462 struct rte_mempool *mb_pool; 1463 1464 if (unlikely(nb_ops == 0)) 1465 return 0; 1466 1467 if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { 1468 DPAA2_SEC_ERR("sessionless crypto op not supported"); 1469 return 0; 1470 } 1471 /*Prepare enqueue descriptor*/ 1472 qbman_eq_desc_clear(&eqdesc); 1473 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); 1474 qbman_eq_desc_set_response(&eqdesc, 0, 0); 1475 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid); 1476 1477 if (!DPAA2_PER_LCORE_DPIO) { 1478 ret = dpaa2_affine_qbman_swp(); 1479 if (ret) { 1480 DPAA2_SEC_ERR( 1481 "Failed to allocate IO portal, tid: %d", 1482 rte_gettid()); 1483 return 0; 1484 } 1485 } 1486 swp = DPAA2_PER_LCORE_PORTAL; 1487 1488 while (nb_ops) { 1489 frames_to_send = (nb_ops > dpaa2_eqcr_size) ? 1490 dpaa2_eqcr_size : nb_ops; 1491 1492 for (loop = 0; loop < frames_to_send; loop++) { 1493 if (*dpaa2_seqn((*ops)->sym->m_src)) { 1494 if (*dpaa2_seqn((*ops)->sym->m_src) & QBMAN_ENQUEUE_FLAG_DCA) { 1495 DPAA2_PER_LCORE_DQRR_SIZE--; 1496 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << 1497 *dpaa2_seqn((*ops)->sym->m_src) & 1498 QBMAN_EQCR_DCA_IDXMASK); 1499 } 1500 flags[loop] = *dpaa2_seqn((*ops)->sym->m_src); 1501 *dpaa2_seqn((*ops)->sym->m_src) = DPAA2_INVALID_MBUF_SEQN; 1502 } 1503 1504 /*Clear the unused FD fields before sending*/ 1505 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd)); 1506 mb_pool = (*ops)->sym->m_src->pool; 1507 bpid = mempool_to_bpid(mb_pool); 1508 ret = build_sec_fd(*ops, &fd_arr[loop], bpid, dpaa2_qp); 1509 if (ret) { 1510 DPAA2_SEC_DP_DEBUG("FD build failed"); 1511 goto skip_tx; 1512 } 1513 ops++; 1514 } 1515 1516 loop = 0; 1517 retry_count = 0; 1518 while (loop < frames_to_send) { 1519 ret = qbman_swp_enqueue_multiple(swp, &eqdesc, 1520 &fd_arr[loop], 1521 &flags[loop], 1522 frames_to_send - loop); 1523 if (unlikely(ret < 0)) { 1524 retry_count++; 1525 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) { 1526 num_tx += loop; 1527 nb_ops -= loop; 1528 DPAA2_SEC_DP_DEBUG("Enqueue fail"); 1529 /* freeing the fle buffers */ 1530 while (loop < frames_to_send) { 1531 free_fle(&fd_arr[loop], 1532 dpaa2_qp); 1533 loop++; 1534 } 1535 goto skip_tx; 1536 } 1537 } else { 1538 loop += ret; 1539 retry_count = 0; 1540 } 1541 } 1542 1543 num_tx += loop; 1544 nb_ops -= loop; 1545 } 1546 skip_tx: 1547 dpaa2_qp->tx_vq.tx_pkts += num_tx; 1548 dpaa2_qp->tx_vq.err_pkts += nb_ops; 1549 return num_tx; 1550 } 1551 1552 static inline struct rte_crypto_op * 1553 sec_simple_fd_to_mbuf(const struct qbman_fd *fd) 1554 { 1555 struct rte_crypto_op *op; 1556 uint16_t len = DPAA2_GET_FD_LEN(fd); 1557 int16_t diff = 0; 1558 dpaa2_sec_session *sess_priv __rte_unused; 1559 1560 if (unlikely(DPAA2_GET_FD_IVP(fd))) { 1561 DPAA2_SEC_ERR("error: non inline buffer"); 1562 return NULL; 1563 } 1564 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF( 1565 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)), 1566 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 1567 1568 diff = len - mbuf->pkt_len; 1569 mbuf->pkt_len += diff; 1570 mbuf->data_len += diff; 1571 op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova; 1572 mbuf->buf_iova = op->sym->aead.digest.phys_addr; 1573 op->sym->aead.digest.phys_addr = 0L; 1574 1575 sess_priv = SECURITY_GET_SESS_PRIV(op->sym->session); 1576 if (sess_priv->dir == DIR_ENC) 1577 mbuf->data_off += SEC_FLC_DHR_OUTBOUND; 1578 else 1579 mbuf->data_off += SEC_FLC_DHR_INBOUND; 1580 1581 if (unlikely(fd->simple.frc)) { 1582 DPAA2_SEC_ERR("SEC returned Error - %x", 1583 fd->simple.frc); 1584 op->status = RTE_CRYPTO_OP_STATUS_ERROR; 1585 } else { 1586 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 1587 } 1588 1589 return op; 1590 } 1591 1592 static inline struct rte_crypto_op * 1593 sec_fd_to_mbuf(const struct qbman_fd *fd, struct dpaa2_sec_qp *qp) 1594 { 1595 struct qbman_fle *fle; 1596 struct rte_crypto_op *op; 1597 struct rte_mbuf *dst, *src; 1598 #if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL) 1599 char debug_str[1024]; 1600 int offset; 1601 #endif 1602 1603 if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single) 1604 return sec_simple_fd_to_mbuf(fd); 1605 1606 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); 1607 1608 DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x", 1609 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset); 1610 1611 /* we are using the first FLE entry to store Mbuf. 1612 * Currently we donot know which FLE has the mbuf stored. 1613 * So while retreiving we can go back 1 FLE from the FD -ADDR 1614 * to get the MBUF Addr from the previous FLE. 1615 * We can have a better approach to use the inline Mbuf 1616 */ 1617 1618 op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1)); 1619 1620 /* Prefeth op */ 1621 src = op->sym->m_src; 1622 rte_prefetch0(src); 1623 1624 if (op->sym->m_dst) { 1625 dst = op->sym->m_dst; 1626 rte_prefetch0(dst); 1627 } else 1628 dst = src; 1629 1630 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 1631 uint16_t len = DPAA2_GET_FD_LEN(fd); 1632 dst->pkt_len = len; 1633 while (dst->next != NULL) { 1634 len -= dst->data_len; 1635 dst = dst->next; 1636 } 1637 dst->data_len = len; 1638 } 1639 1640 #if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL) 1641 offset = sprintf(debug_str, "Mbuf %p from %s pool ", 1642 dst, DPAA2_GET_FD_IVP(fd) ? "SW" : "BMAN"); 1643 if (!DPAA2_GET_FD_IVP(fd)) { 1644 offset += sprintf(&debug_str[offset], "bpid = %d ", 1645 DPAA2_GET_FD_BPID(fd)); 1646 } 1647 offset += sprintf(&debug_str[offset], 1648 "private size = %d ", dst->pool->private_data_size); 1649 offset += sprintf(&debug_str[offset], 1650 "addr %p, fdaddr =%" PRIx64 ", off =%d, len =%d", 1651 dst->buf_addr, DPAA2_GET_FD_ADDR(fd), 1652 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_LEN(fd)); 1653 DPAA2_SEC_DP_DEBUG("%s", debug_str); 1654 #endif 1655 1656 /* free the fle memory */ 1657 if (likely(rte_pktmbuf_is_contiguous(src))) { 1658 rte_mempool_put(qp->fle_pool, (void *)(fle-1)); 1659 } else 1660 rte_free((void *)(fle-1)); 1661 1662 return op; 1663 } 1664 1665 static void 1666 dpaa2_sec_dump(struct rte_crypto_op *op, FILE *f) 1667 { 1668 int i; 1669 dpaa2_sec_session *sess = NULL; 1670 struct ctxt_priv *priv; 1671 uint8_t bufsize; 1672 struct rte_crypto_sym_op *sym_op; 1673 1674 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) 1675 sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session); 1676 #ifdef RTE_LIB_SECURITY 1677 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) 1678 sess = SECURITY_GET_SESS_PRIV(op->sym->session); 1679 #endif 1680 1681 if (sess == NULL) 1682 goto mbuf_dump; 1683 1684 priv = (struct ctxt_priv *)sess->ctxt; 1685 fprintf(f, "\n****************************************\n" 1686 "session params:\n\tContext type:\t%d\n\tDirection:\t%s\n" 1687 "\tCipher alg:\t%d\n\tAuth alg:\t%d\n\tAead alg:\t%d\n" 1688 "\tCipher key len:\t%zd\n", sess->ctxt_type, 1689 (sess->dir == DIR_ENC) ? "DIR_ENC" : "DIR_DEC", 1690 sess->cipher_alg, sess->auth_alg, sess->aead_alg, 1691 sess->cipher_key.length); 1692 rte_hexdump(f, "cipher key", sess->cipher_key.data, 1693 sess->cipher_key.length); 1694 rte_hexdump(f, "auth key", sess->auth_key.data, 1695 sess->auth_key.length); 1696 fprintf(f, "\tAuth key len:\t%zd\n\tIV len:\t\t%d\n\tIV offset:\t%d\n" 1697 "\tdigest length:\t%d\n\tstatus:\t\t%d\n\taead auth only" 1698 " len:\t%d\n\taead cipher text:\t%d\n", 1699 sess->auth_key.length, sess->iv.length, sess->iv.offset, 1700 sess->digest_length, sess->status, 1701 sess->ext_params.aead_ctxt.auth_only_len, 1702 sess->ext_params.aead_ctxt.auth_cipher_text); 1703 #ifdef RTE_LIB_SECURITY 1704 fprintf(f, "PDCP session params:\n" 1705 "\tDomain:\t\t%d\n\tBearer:\t\t%d\n\tpkt_dir:\t%d\n\thfn_ovd:" 1706 "\t%d\n\tsn_size:\t%d\n\thfn_ovd_offset:\t%d\n\thfn:\t\t%d\n" 1707 "\thfn_threshold:\t0x%x\n", sess->pdcp.domain, 1708 sess->pdcp.bearer, sess->pdcp.pkt_dir, sess->pdcp.hfn_ovd, 1709 sess->pdcp.sn_size, sess->pdcp.hfn_ovd_offset, sess->pdcp.hfn, 1710 sess->pdcp.hfn_threshold); 1711 1712 #endif 1713 bufsize = (uint8_t)priv->flc_desc[0].flc.word1_sdl; 1714 fprintf(f, "Descriptor Dump:\n"); 1715 for (i = 0; i < bufsize; i++) 1716 fprintf(f, "\tDESC[%d]:0x%x\n", i, priv->flc_desc[0].desc[i]); 1717 1718 fprintf(f, "\n"); 1719 mbuf_dump: 1720 sym_op = op->sym; 1721 if (sym_op->m_src) { 1722 fprintf(f, "Source mbuf:\n"); 1723 rte_pktmbuf_dump(f, sym_op->m_src, sym_op->m_src->data_len); 1724 } 1725 if (sym_op->m_dst) { 1726 fprintf(f, "Destination mbuf:\n"); 1727 rte_pktmbuf_dump(f, sym_op->m_dst, sym_op->m_dst->data_len); 1728 } 1729 1730 fprintf(f, "Session address = %p\ncipher offset: %d, length: %d\n" 1731 "auth offset: %d, length: %d\n aead offset: %d, length: %d\n" 1732 , sym_op->session, 1733 sym_op->cipher.data.offset, sym_op->cipher.data.length, 1734 sym_op->auth.data.offset, sym_op->auth.data.length, 1735 sym_op->aead.data.offset, sym_op->aead.data.length); 1736 fprintf(f, "\n"); 1737 1738 } 1739 1740 static void 1741 dpaa2_sec_free_eqresp_buf(uint16_t eqresp_ci, 1742 struct dpaa2_queue *dpaa2_q) 1743 { 1744 struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO; 1745 struct rte_crypto_op *op; 1746 struct qbman_fd *fd; 1747 struct dpaa2_sec_qp *dpaa2_qp; 1748 1749 dpaa2_qp = container_of(dpaa2_q, struct dpaa2_sec_qp, tx_vq); 1750 fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]); 1751 op = sec_fd_to_mbuf(fd, dpaa2_qp); 1752 /* Instead of freeing, enqueue it to the sec tx queue (sec->core) 1753 * after setting an error in FD. But this will have performance impact. 1754 */ 1755 rte_pktmbuf_free(op->sym->m_src); 1756 } 1757 1758 static void 1759 dpaa2_sec_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q, 1760 struct rte_mbuf *m, 1761 struct qbman_eq_desc *eqdesc) 1762 { 1763 struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO; 1764 struct eqresp_metadata *eqresp_meta; 1765 struct dpaa2_sec_dev_private *priv = dpaa2_q->crypto_data->dev_private; 1766 uint16_t orpid, seqnum; 1767 uint8_t dq_idx; 1768 1769 if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) { 1770 orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >> 1771 DPAA2_EQCR_OPRID_SHIFT; 1772 seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >> 1773 DPAA2_EQCR_SEQNUM_SHIFT; 1774 1775 1776 if (!priv->en_loose_ordered) { 1777 qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0); 1778 qbman_eq_desc_set_response(eqdesc, (uint64_t) 1779 DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[ 1780 dpio_dev->eqresp_pi]), 1); 1781 qbman_eq_desc_set_token(eqdesc, 1); 1782 1783 eqresp_meta = &dpio_dev->eqresp_meta[dpio_dev->eqresp_pi]; 1784 eqresp_meta->dpaa2_q = dpaa2_q; 1785 eqresp_meta->mp = m->pool; 1786 1787 dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ? 1788 dpio_dev->eqresp_pi++ : (dpio_dev->eqresp_pi = 0); 1789 } else { 1790 qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0); 1791 } 1792 } else { 1793 dq_idx = *dpaa2_seqn(m) - 1; 1794 qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0); 1795 DPAA2_PER_LCORE_DQRR_SIZE--; 1796 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx); 1797 } 1798 *dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN; 1799 } 1800 1801 1802 static uint16_t 1803 dpaa2_sec_enqueue_burst_ordered(void *qp, struct rte_crypto_op **ops, 1804 uint16_t nb_ops) 1805 { 1806 /* Function to transmit the frames to given device and VQ*/ 1807 uint32_t loop; 1808 int32_t ret; 1809 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 1810 uint32_t frames_to_send, num_free_eq_desc, retry_count; 1811 struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS]; 1812 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 1813 struct qbman_swp *swp; 1814 uint16_t num_tx = 0; 1815 uint16_t bpid; 1816 struct rte_mempool *mb_pool; 1817 struct dpaa2_sec_dev_private *priv = 1818 dpaa2_qp->tx_vq.crypto_data->dev_private; 1819 1820 if (unlikely(nb_ops == 0)) 1821 return 0; 1822 1823 if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { 1824 DPAA2_SEC_ERR("sessionless crypto op not supported"); 1825 return 0; 1826 } 1827 1828 if (!DPAA2_PER_LCORE_DPIO) { 1829 ret = dpaa2_affine_qbman_swp(); 1830 if (ret) { 1831 DPAA2_SEC_ERR("Failure in affining portal"); 1832 return 0; 1833 } 1834 } 1835 swp = DPAA2_PER_LCORE_PORTAL; 1836 1837 while (nb_ops) { 1838 frames_to_send = (nb_ops > dpaa2_eqcr_size) ? 1839 dpaa2_eqcr_size : nb_ops; 1840 1841 if (!priv->en_loose_ordered) { 1842 if (*dpaa2_seqn((*ops)->sym->m_src)) { 1843 num_free_eq_desc = dpaa2_free_eq_descriptors(); 1844 if (num_free_eq_desc < frames_to_send) 1845 frames_to_send = num_free_eq_desc; 1846 } 1847 } 1848 1849 for (loop = 0; loop < frames_to_send; loop++) { 1850 /*Prepare enqueue descriptor*/ 1851 qbman_eq_desc_clear(&eqdesc[loop]); 1852 qbman_eq_desc_set_fq(&eqdesc[loop], dpaa2_qp->tx_vq.fqid); 1853 1854 if (*dpaa2_seqn((*ops)->sym->m_src)) 1855 dpaa2_sec_set_enqueue_descriptor( 1856 &dpaa2_qp->tx_vq, 1857 (*ops)->sym->m_src, 1858 &eqdesc[loop]); 1859 else 1860 qbman_eq_desc_set_no_orp(&eqdesc[loop], 1861 DPAA2_EQ_RESP_ERR_FQ); 1862 1863 /*Clear the unused FD fields before sending*/ 1864 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd)); 1865 mb_pool = (*ops)->sym->m_src->pool; 1866 bpid = mempool_to_bpid(mb_pool); 1867 ret = build_sec_fd(*ops, &fd_arr[loop], bpid, dpaa2_qp); 1868 if (ret) { 1869 DPAA2_SEC_DP_DEBUG("FD build failed"); 1870 goto skip_tx; 1871 } 1872 ops++; 1873 } 1874 1875 loop = 0; 1876 retry_count = 0; 1877 while (loop < frames_to_send) { 1878 ret = qbman_swp_enqueue_multiple_desc(swp, 1879 &eqdesc[loop], &fd_arr[loop], 1880 frames_to_send - loop); 1881 if (unlikely(ret < 0)) { 1882 retry_count++; 1883 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) { 1884 num_tx += loop; 1885 nb_ops -= loop; 1886 DPAA2_SEC_DP_DEBUG("Enqueue fail"); 1887 /* freeing the fle buffers */ 1888 while (loop < frames_to_send) { 1889 free_fle(&fd_arr[loop], 1890 dpaa2_qp); 1891 loop++; 1892 } 1893 goto skip_tx; 1894 } 1895 } else { 1896 loop += ret; 1897 retry_count = 0; 1898 } 1899 } 1900 1901 num_tx += loop; 1902 nb_ops -= loop; 1903 } 1904 1905 skip_tx: 1906 dpaa2_qp->tx_vq.tx_pkts += num_tx; 1907 dpaa2_qp->tx_vq.err_pkts += nb_ops; 1908 return num_tx; 1909 } 1910 1911 static uint16_t 1912 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, 1913 uint16_t nb_ops) 1914 { 1915 /* Function is responsible to receive frames for a given device and VQ*/ 1916 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 1917 struct qbman_result *dq_storage; 1918 uint32_t fqid = dpaa2_qp->rx_vq.fqid; 1919 int ret, num_rx = 0; 1920 uint8_t is_last = 0, status; 1921 struct qbman_swp *swp; 1922 const struct qbman_fd *fd; 1923 struct qbman_pull_desc pulldesc; 1924 1925 if (!DPAA2_PER_LCORE_DPIO) { 1926 ret = dpaa2_affine_qbman_swp(); 1927 if (ret) { 1928 DPAA2_SEC_ERR( 1929 "Failed to allocate IO portal, tid: %d", 1930 rte_gettid()); 1931 return 0; 1932 } 1933 } 1934 swp = DPAA2_PER_LCORE_PORTAL; 1935 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0]; 1936 1937 qbman_pull_desc_clear(&pulldesc); 1938 qbman_pull_desc_set_numframes(&pulldesc, 1939 (nb_ops > dpaa2_dqrr_size) ? 1940 dpaa2_dqrr_size : nb_ops); 1941 qbman_pull_desc_set_fq(&pulldesc, fqid); 1942 qbman_pull_desc_set_storage(&pulldesc, dq_storage, 1943 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage), 1944 1); 1945 1946 /*Issue a volatile dequeue command. */ 1947 while (1) { 1948 if (qbman_swp_pull(swp, &pulldesc)) { 1949 DPAA2_SEC_WARN( 1950 "SEC VDQ command is not issued : QBMAN busy"); 1951 /* Portal was busy, try again */ 1952 continue; 1953 } 1954 break; 1955 }; 1956 1957 /* Receive the packets till Last Dequeue entry is found with 1958 * respect to the above issues PULL command. 1959 */ 1960 while (!is_last) { 1961 /* Check if the previous issued command is completed. 1962 * Also seems like the SWP is shared between the Ethernet Driver 1963 * and the SEC driver. 1964 */ 1965 while (!qbman_check_command_complete(dq_storage)) 1966 ; 1967 1968 /* Loop until the dq_storage is updated with 1969 * new token by QBMAN 1970 */ 1971 while (!qbman_check_new_result(dq_storage)) 1972 ; 1973 /* Check whether Last Pull command is Expired and 1974 * setting Condition for Loop termination 1975 */ 1976 if (qbman_result_DQ_is_pull_complete(dq_storage)) { 1977 is_last = 1; 1978 /* Check for valid frame. */ 1979 status = (uint8_t)qbman_result_DQ_flags(dq_storage); 1980 if (unlikely( 1981 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) { 1982 DPAA2_SEC_DP_DEBUG("No frame is delivered"); 1983 continue; 1984 } 1985 } 1986 1987 fd = qbman_result_DQ_fd(dq_storage); 1988 ops[num_rx] = sec_fd_to_mbuf(fd, dpaa2_qp); 1989 1990 if (unlikely(fd->simple.frc)) { 1991 /* TODO Parse SEC errors */ 1992 if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_NO_DUMP) { 1993 DPAA2_SEC_DP_ERR("SEC returned Error - %x", 1994 fd->simple.frc); 1995 if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_ERR_DUMP) 1996 dpaa2_sec_dump(ops[num_rx], stdout); 1997 } 1998 1999 dpaa2_qp->rx_vq.err_pkts += 1; 2000 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR; 2001 } else { 2002 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 2003 } 2004 2005 num_rx++; 2006 dq_storage++; 2007 } /* End of Packet Rx loop */ 2008 2009 dpaa2_qp->rx_vq.rx_pkts += num_rx; 2010 2011 DPAA2_SEC_DP_DEBUG("SEC RX pkts %d err pkts %" PRIu64, num_rx, 2012 dpaa2_qp->rx_vq.err_pkts); 2013 /*Return the total number of packets received to DPAA2 app*/ 2014 return num_rx; 2015 } 2016 2017 /** Release queue pair */ 2018 static int 2019 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) 2020 { 2021 struct dpaa2_sec_qp *qp = 2022 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id]; 2023 2024 PMD_INIT_FUNC_TRACE(); 2025 2026 if (qp->rx_vq.q_storage) { 2027 dpaa2_free_dq_storage(qp->rx_vq.q_storage); 2028 rte_free(qp->rx_vq.q_storage); 2029 } 2030 rte_mempool_free(qp->fle_pool); 2031 rte_free(qp); 2032 2033 dev->data->queue_pairs[queue_pair_id] = NULL; 2034 2035 return 0; 2036 } 2037 2038 /** Setup a queue pair */ 2039 static int 2040 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, 2041 const struct rte_cryptodev_qp_conf *qp_conf, 2042 __rte_unused int socket_id) 2043 { 2044 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 2045 struct dpaa2_sec_qp *qp; 2046 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 2047 struct dpseci_rx_queue_cfg cfg; 2048 int32_t retcode; 2049 char str[RTE_MEMZONE_NAMESIZE]; 2050 2051 PMD_INIT_FUNC_TRACE(); 2052 2053 /* If qp is already in use free ring memory and qp metadata. */ 2054 if (dev->data->queue_pairs[qp_id] != NULL) { 2055 DPAA2_SEC_INFO("QP already setup"); 2056 return 0; 2057 } 2058 2059 if (qp_conf->nb_descriptors < (2 * FLE_POOL_CACHE_SIZE)) { 2060 DPAA2_SEC_ERR("Minimum supported nb_descriptors %d," 2061 " but given %d", (2 * FLE_POOL_CACHE_SIZE), 2062 qp_conf->nb_descriptors); 2063 return -EINVAL; 2064 } 2065 2066 DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p", 2067 dev, qp_id, qp_conf); 2068 2069 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 2070 2071 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp), 2072 RTE_CACHE_LINE_SIZE); 2073 if (!qp) { 2074 DPAA2_SEC_ERR("malloc failed for rx/tx queues"); 2075 return -ENOMEM; 2076 } 2077 2078 qp->rx_vq.crypto_data = dev->data; 2079 qp->tx_vq.crypto_data = dev->data; 2080 qp->rx_vq.q_storage = rte_malloc("sec dq storage", 2081 sizeof(struct queue_storage_info_t), 2082 RTE_CACHE_LINE_SIZE); 2083 if (!qp->rx_vq.q_storage) { 2084 DPAA2_SEC_ERR("malloc failed for q_storage"); 2085 return -ENOMEM; 2086 } 2087 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t)); 2088 2089 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) { 2090 DPAA2_SEC_ERR("Unable to allocate dequeue storage"); 2091 return -ENOMEM; 2092 } 2093 2094 dev->data->queue_pairs[qp_id] = qp; 2095 2096 snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d_%d", 2097 getpid(), dev->data->dev_id, qp_id); 2098 qp->fle_pool = rte_mempool_create((const char *)str, 2099 qp_conf->nb_descriptors, 2100 FLE_POOL_BUF_SIZE, 2101 FLE_POOL_CACHE_SIZE, 0, 2102 NULL, NULL, NULL, NULL, 2103 SOCKET_ID_ANY, MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET); 2104 if (!qp->fle_pool) { 2105 DPAA2_SEC_ERR("Mempool (%s) creation failed", str); 2106 return -ENOMEM; 2107 } 2108 2109 cfg.dest_cfg.dest_type = DPSECI_DEST_NONE; 2110 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 2111 qp_id, &cfg); 2112 return retcode; 2113 } 2114 2115 /** Returns the size of the aesni gcm session structure */ 2116 static unsigned int 2117 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) 2118 { 2119 PMD_INIT_FUNC_TRACE(); 2120 2121 return sizeof(dpaa2_sec_session); 2122 } 2123 2124 static int 2125 dpaa2_sec_cipher_init(struct rte_crypto_sym_xform *xform, 2126 dpaa2_sec_session *session) 2127 { 2128 struct alginfo cipherdata; 2129 int bufsize, ret = 0; 2130 struct ctxt_priv *priv; 2131 struct sec_flow_context *flc; 2132 2133 PMD_INIT_FUNC_TRACE(); 2134 2135 /* For SEC CIPHER only one descriptor is required. */ 2136 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2137 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 2138 RTE_CACHE_LINE_SIZE); 2139 if (priv == NULL) { 2140 DPAA2_SEC_ERR("No Memory for priv CTXT"); 2141 return -ENOMEM; 2142 } 2143 2144 flc = &priv->flc_desc[0].flc; 2145 2146 session->ctxt_type = DPAA2_SEC_CIPHER; 2147 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, 2148 RTE_CACHE_LINE_SIZE); 2149 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) { 2150 DPAA2_SEC_ERR("No Memory for cipher key"); 2151 rte_free(priv); 2152 return -ENOMEM; 2153 } 2154 session->cipher_key.length = xform->cipher.key.length; 2155 2156 memcpy(session->cipher_key.data, xform->cipher.key.data, 2157 xform->cipher.key.length); 2158 cipherdata.key = (size_t)session->cipher_key.data; 2159 cipherdata.keylen = session->cipher_key.length; 2160 cipherdata.key_enc_flags = 0; 2161 cipherdata.key_type = RTA_DATA_IMM; 2162 2163 /* Set IV parameters */ 2164 session->iv.offset = xform->cipher.iv.offset; 2165 session->iv.length = xform->cipher.iv.length; 2166 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2167 DIR_ENC : DIR_DEC; 2168 2169 switch (xform->cipher.algo) { 2170 case RTE_CRYPTO_CIPHER_AES_CBC: 2171 cipherdata.algtype = OP_ALG_ALGSEL_AES; 2172 cipherdata.algmode = OP_ALG_AAI_CBC; 2173 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 2174 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 2175 SHR_NEVER, &cipherdata, 2176 session->iv.length, 2177 session->dir); 2178 break; 2179 case RTE_CRYPTO_CIPHER_3DES_CBC: 2180 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 2181 cipherdata.algmode = OP_ALG_AAI_CBC; 2182 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 2183 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 2184 SHR_NEVER, &cipherdata, 2185 session->iv.length, 2186 session->dir); 2187 break; 2188 case RTE_CRYPTO_CIPHER_DES_CBC: 2189 cipherdata.algtype = OP_ALG_ALGSEL_DES; 2190 cipherdata.algmode = OP_ALG_AAI_CBC; 2191 session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC; 2192 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 2193 SHR_NEVER, &cipherdata, 2194 session->iv.length, 2195 session->dir); 2196 break; 2197 case RTE_CRYPTO_CIPHER_AES_CTR: 2198 cipherdata.algtype = OP_ALG_ALGSEL_AES; 2199 cipherdata.algmode = OP_ALG_AAI_CTR; 2200 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 2201 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 2202 SHR_NEVER, &cipherdata, 2203 session->iv.length, 2204 session->dir); 2205 break; 2206 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2207 cipherdata.algtype = OP_ALG_ALGSEL_SNOW_F8; 2208 session->cipher_alg = RTE_CRYPTO_CIPHER_SNOW3G_UEA2; 2209 bufsize = cnstr_shdsc_snow_f8(priv->flc_desc[0].desc, 1, 0, 2210 &cipherdata, 2211 session->dir); 2212 break; 2213 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2214 cipherdata.algtype = OP_ALG_ALGSEL_ZUCE; 2215 session->cipher_alg = RTE_CRYPTO_CIPHER_ZUC_EEA3; 2216 bufsize = cnstr_shdsc_zuce(priv->flc_desc[0].desc, 1, 0, 2217 &cipherdata, 2218 session->dir); 2219 break; 2220 default: 2221 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %s (%u)", 2222 rte_cryptodev_get_cipher_algo_string(xform->cipher.algo), 2223 xform->cipher.algo); 2224 ret = -ENOTSUP; 2225 goto error_out; 2226 } 2227 2228 if (bufsize < 0) { 2229 DPAA2_SEC_ERR("Crypto: Descriptor build failed"); 2230 ret = -EINVAL; 2231 goto error_out; 2232 } 2233 2234 flc->word1_sdl = (uint8_t)bufsize; 2235 session->ctxt = priv; 2236 2237 #ifdef CAAM_DESC_DEBUG 2238 int i; 2239 for (i = 0; i < bufsize; i++) 2240 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]); 2241 #endif 2242 return ret; 2243 2244 error_out: 2245 rte_free(session->cipher_key.data); 2246 rte_free(priv); 2247 return ret; 2248 } 2249 2250 static int 2251 dpaa2_sec_auth_init(struct rte_crypto_sym_xform *xform, 2252 dpaa2_sec_session *session) 2253 { 2254 struct alginfo authdata; 2255 int bufsize, ret = 0; 2256 struct ctxt_priv *priv; 2257 struct sec_flow_context *flc; 2258 2259 PMD_INIT_FUNC_TRACE(); 2260 2261 memset(&authdata, 0, sizeof(authdata)); 2262 2263 /* For SEC AUTH three descriptors are required for various stages */ 2264 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2265 sizeof(struct ctxt_priv) + 3 * 2266 sizeof(struct sec_flc_desc), 2267 RTE_CACHE_LINE_SIZE); 2268 if (priv == NULL) { 2269 DPAA2_SEC_ERR("No Memory for priv CTXT"); 2270 return -ENOMEM; 2271 } 2272 2273 flc = &priv->flc_desc[DESC_INITFINAL].flc; 2274 2275 session->ctxt_type = DPAA2_SEC_AUTH; 2276 session->auth_key.length = xform->auth.key.length; 2277 if (xform->auth.key.length) { 2278 session->auth_key.data = rte_zmalloc(NULL, 2279 xform->auth.key.length, 2280 RTE_CACHE_LINE_SIZE); 2281 if (session->auth_key.data == NULL) { 2282 DPAA2_SEC_ERR("Unable to allocate memory for auth key"); 2283 rte_free(priv); 2284 return -ENOMEM; 2285 } 2286 memcpy(session->auth_key.data, xform->auth.key.data, 2287 xform->auth.key.length); 2288 authdata.key = (size_t)session->auth_key.data; 2289 authdata.key_enc_flags = 0; 2290 authdata.key_type = RTA_DATA_IMM; 2291 } 2292 authdata.keylen = session->auth_key.length; 2293 2294 session->digest_length = xform->auth.digest_length; 2295 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? 2296 DIR_ENC : DIR_DEC; 2297 2298 switch (xform->auth.algo) { 2299 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2300 authdata.algtype = OP_ALG_ALGSEL_SHA1; 2301 authdata.algmode = OP_ALG_AAI_HMAC; 2302 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 2303 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2304 1, 0, SHR_NEVER, &authdata, 2305 !session->dir, 2306 session->digest_length); 2307 break; 2308 case RTE_CRYPTO_AUTH_MD5_HMAC: 2309 authdata.algtype = OP_ALG_ALGSEL_MD5; 2310 authdata.algmode = OP_ALG_AAI_HMAC; 2311 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 2312 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2313 1, 0, SHR_NEVER, &authdata, 2314 !session->dir, 2315 session->digest_length); 2316 break; 2317 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2318 authdata.algtype = OP_ALG_ALGSEL_SHA256; 2319 authdata.algmode = OP_ALG_AAI_HMAC; 2320 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 2321 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2322 1, 0, SHR_NEVER, &authdata, 2323 !session->dir, 2324 session->digest_length); 2325 break; 2326 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2327 authdata.algtype = OP_ALG_ALGSEL_SHA384; 2328 authdata.algmode = OP_ALG_AAI_HMAC; 2329 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 2330 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2331 1, 0, SHR_NEVER, &authdata, 2332 !session->dir, 2333 session->digest_length); 2334 break; 2335 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2336 authdata.algtype = OP_ALG_ALGSEL_SHA512; 2337 authdata.algmode = OP_ALG_AAI_HMAC; 2338 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 2339 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2340 1, 0, SHR_NEVER, &authdata, 2341 !session->dir, 2342 session->digest_length); 2343 break; 2344 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2345 authdata.algtype = OP_ALG_ALGSEL_SHA224; 2346 authdata.algmode = OP_ALG_AAI_HMAC; 2347 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 2348 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2349 1, 0, SHR_NEVER, &authdata, 2350 !session->dir, 2351 session->digest_length); 2352 break; 2353 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2354 authdata.algtype = OP_ALG_ALGSEL_SNOW_F9; 2355 authdata.algmode = OP_ALG_AAI_F9; 2356 session->auth_alg = RTE_CRYPTO_AUTH_SNOW3G_UIA2; 2357 session->iv.offset = xform->auth.iv.offset; 2358 session->iv.length = xform->auth.iv.length; 2359 bufsize = cnstr_shdsc_snow_f9(priv->flc_desc[DESC_INITFINAL].desc, 2360 1, 0, &authdata, 2361 !session->dir, 2362 session->digest_length); 2363 break; 2364 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2365 authdata.algtype = OP_ALG_ALGSEL_ZUCA; 2366 authdata.algmode = OP_ALG_AAI_F9; 2367 session->auth_alg = RTE_CRYPTO_AUTH_ZUC_EIA3; 2368 session->iv.offset = xform->auth.iv.offset; 2369 session->iv.length = xform->auth.iv.length; 2370 bufsize = cnstr_shdsc_zuca(priv->flc_desc[DESC_INITFINAL].desc, 2371 1, 0, &authdata, 2372 !session->dir, 2373 session->digest_length); 2374 break; 2375 case RTE_CRYPTO_AUTH_SHA1: 2376 authdata.algtype = OP_ALG_ALGSEL_SHA1; 2377 authdata.algmode = OP_ALG_AAI_HASH; 2378 session->auth_alg = RTE_CRYPTO_AUTH_SHA1; 2379 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2380 1, 0, SHR_NEVER, &authdata, 2381 !session->dir, 2382 session->digest_length); 2383 break; 2384 case RTE_CRYPTO_AUTH_MD5: 2385 authdata.algtype = OP_ALG_ALGSEL_MD5; 2386 authdata.algmode = OP_ALG_AAI_HASH; 2387 session->auth_alg = RTE_CRYPTO_AUTH_MD5; 2388 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2389 1, 0, SHR_NEVER, &authdata, 2390 !session->dir, 2391 session->digest_length); 2392 break; 2393 case RTE_CRYPTO_AUTH_SHA256: 2394 authdata.algtype = OP_ALG_ALGSEL_SHA256; 2395 authdata.algmode = OP_ALG_AAI_HASH; 2396 session->auth_alg = RTE_CRYPTO_AUTH_SHA256; 2397 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2398 1, 0, SHR_NEVER, &authdata, 2399 !session->dir, 2400 session->digest_length); 2401 break; 2402 case RTE_CRYPTO_AUTH_SHA384: 2403 authdata.algtype = OP_ALG_ALGSEL_SHA384; 2404 authdata.algmode = OP_ALG_AAI_HASH; 2405 session->auth_alg = RTE_CRYPTO_AUTH_SHA384; 2406 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2407 1, 0, SHR_NEVER, &authdata, 2408 !session->dir, 2409 session->digest_length); 2410 break; 2411 case RTE_CRYPTO_AUTH_SHA512: 2412 authdata.algtype = OP_ALG_ALGSEL_SHA512; 2413 authdata.algmode = OP_ALG_AAI_HASH; 2414 session->auth_alg = RTE_CRYPTO_AUTH_SHA512; 2415 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2416 1, 0, SHR_NEVER, &authdata, 2417 !session->dir, 2418 session->digest_length); 2419 break; 2420 case RTE_CRYPTO_AUTH_SHA224: 2421 authdata.algtype = OP_ALG_ALGSEL_SHA224; 2422 authdata.algmode = OP_ALG_AAI_HASH; 2423 session->auth_alg = RTE_CRYPTO_AUTH_SHA224; 2424 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2425 1, 0, SHR_NEVER, &authdata, 2426 !session->dir, 2427 session->digest_length); 2428 break; 2429 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2430 authdata.algtype = OP_ALG_ALGSEL_AES; 2431 authdata.algmode = OP_ALG_AAI_XCBC_MAC; 2432 session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC; 2433 bufsize = cnstr_shdsc_aes_mac( 2434 priv->flc_desc[DESC_INITFINAL].desc, 2435 1, 0, SHR_NEVER, &authdata, 2436 !session->dir, 2437 session->digest_length); 2438 break; 2439 case RTE_CRYPTO_AUTH_AES_CMAC: 2440 authdata.algtype = OP_ALG_ALGSEL_AES; 2441 authdata.algmode = OP_ALG_AAI_CMAC; 2442 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC; 2443 bufsize = cnstr_shdsc_aes_mac( 2444 priv->flc_desc[DESC_INITFINAL].desc, 2445 1, 0, SHR_NEVER, &authdata, 2446 !session->dir, 2447 session->digest_length); 2448 break; 2449 default: 2450 DPAA2_SEC_ERR("Crypto: Unsupported Auth alg %s (%u)", 2451 rte_cryptodev_get_auth_algo_string(xform->auth.algo), 2452 xform->auth.algo); 2453 ret = -ENOTSUP; 2454 goto error_out; 2455 } 2456 2457 if (bufsize < 0) { 2458 DPAA2_SEC_ERR("Crypto: Invalid SEC-DESC buffer length"); 2459 ret = -EINVAL; 2460 goto error_out; 2461 } 2462 2463 flc->word1_sdl = (uint8_t)bufsize; 2464 session->ctxt = priv; 2465 #ifdef CAAM_DESC_DEBUG 2466 int i; 2467 for (i = 0; i < bufsize; i++) 2468 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", 2469 i, priv->flc_desc[DESC_INITFINAL].desc[i]); 2470 #endif 2471 2472 return ret; 2473 2474 error_out: 2475 rte_free(session->auth_key.data); 2476 rte_free(priv); 2477 return ret; 2478 } 2479 2480 static int 2481 dpaa2_sec_aead_init(struct rte_crypto_sym_xform *xform, 2482 dpaa2_sec_session *session) 2483 { 2484 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt; 2485 struct alginfo aeaddata; 2486 int bufsize; 2487 struct ctxt_priv *priv; 2488 struct sec_flow_context *flc; 2489 struct rte_crypto_aead_xform *aead_xform = &xform->aead; 2490 int err, ret = 0; 2491 2492 PMD_INIT_FUNC_TRACE(); 2493 2494 /* Set IV parameters */ 2495 session->iv.offset = aead_xform->iv.offset; 2496 session->iv.length = aead_xform->iv.length; 2497 session->ctxt_type = DPAA2_SEC_AEAD; 2498 2499 /* For SEC AEAD only one descriptor is required */ 2500 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2501 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 2502 RTE_CACHE_LINE_SIZE); 2503 if (priv == NULL) { 2504 DPAA2_SEC_ERR("No Memory for priv CTXT"); 2505 return -ENOMEM; 2506 } 2507 2508 flc = &priv->flc_desc[0].flc; 2509 2510 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 2511 RTE_CACHE_LINE_SIZE); 2512 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 2513 DPAA2_SEC_ERR("No Memory for aead key"); 2514 rte_free(priv); 2515 return -ENOMEM; 2516 } 2517 memcpy(session->aead_key.data, aead_xform->key.data, 2518 aead_xform->key.length); 2519 2520 session->digest_length = aead_xform->digest_length; 2521 session->aead_key.length = aead_xform->key.length; 2522 ctxt->auth_only_len = aead_xform->aad_length; 2523 2524 aeaddata.key = (size_t)session->aead_key.data; 2525 aeaddata.keylen = session->aead_key.length; 2526 aeaddata.key_enc_flags = 0; 2527 aeaddata.key_type = RTA_DATA_IMM; 2528 2529 switch (aead_xform->algo) { 2530 case RTE_CRYPTO_AEAD_AES_GCM: 2531 aeaddata.algtype = OP_ALG_ALGSEL_AES; 2532 aeaddata.algmode = OP_ALG_AAI_GCM; 2533 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; 2534 break; 2535 default: 2536 2537 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %s (%u)", 2538 rte_cryptodev_get_aead_algo_string(aead_xform->algo), 2539 aead_xform->algo); 2540 ret = -ENOTSUP; 2541 goto error_out; 2542 } 2543 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 2544 DIR_ENC : DIR_DEC; 2545 2546 priv->flc_desc[0].desc[0] = aeaddata.keylen; 2547 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 2548 DESC_JOB_IO_LEN, 2549 (unsigned int *)priv->flc_desc[0].desc, 2550 &priv->flc_desc[0].desc[1], 1); 2551 2552 if (err < 0) { 2553 DPAA2_SEC_ERR("Crypto: Incorrect key lengths"); 2554 ret = -EINVAL; 2555 goto error_out; 2556 } 2557 if (priv->flc_desc[0].desc[1] & 1) { 2558 aeaddata.key_type = RTA_DATA_IMM; 2559 } else { 2560 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key); 2561 aeaddata.key_type = RTA_DATA_PTR; 2562 } 2563 priv->flc_desc[0].desc[0] = 0; 2564 priv->flc_desc[0].desc[1] = 0; 2565 2566 if (session->dir == DIR_ENC) 2567 bufsize = cnstr_shdsc_gcm_encap( 2568 priv->flc_desc[0].desc, 1, 0, SHR_NEVER, 2569 &aeaddata, session->iv.length, 2570 session->digest_length); 2571 else 2572 bufsize = cnstr_shdsc_gcm_decap( 2573 priv->flc_desc[0].desc, 1, 0, SHR_NEVER, 2574 &aeaddata, session->iv.length, 2575 session->digest_length); 2576 if (bufsize < 0) { 2577 DPAA2_SEC_ERR("Crypto: Invalid SEC-DESC buffer length"); 2578 ret = -EINVAL; 2579 goto error_out; 2580 } 2581 2582 flc->word1_sdl = (uint8_t)bufsize; 2583 session->ctxt = priv; 2584 #ifdef CAAM_DESC_DEBUG 2585 int i; 2586 for (i = 0; i < bufsize; i++) 2587 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", 2588 i, priv->flc_desc[0].desc[i]); 2589 #endif 2590 return ret; 2591 2592 error_out: 2593 rte_free(session->aead_key.data); 2594 rte_free(priv); 2595 return ret; 2596 } 2597 2598 2599 static int 2600 dpaa2_sec_aead_chain_init(struct rte_crypto_sym_xform *xform, 2601 dpaa2_sec_session *session) 2602 { 2603 struct alginfo authdata, cipherdata; 2604 int bufsize; 2605 struct ctxt_priv *priv; 2606 struct sec_flow_context *flc; 2607 struct rte_crypto_cipher_xform *cipher_xform; 2608 struct rte_crypto_auth_xform *auth_xform; 2609 int err, ret = 0; 2610 2611 PMD_INIT_FUNC_TRACE(); 2612 2613 if (session->ext_params.aead_ctxt.auth_cipher_text) { 2614 cipher_xform = &xform->cipher; 2615 auth_xform = &xform->next->auth; 2616 session->ctxt_type = 2617 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2618 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER; 2619 } else { 2620 cipher_xform = &xform->next->cipher; 2621 auth_xform = &xform->auth; 2622 session->ctxt_type = 2623 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2624 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH; 2625 } 2626 2627 /* Set IV parameters */ 2628 session->iv.offset = cipher_xform->iv.offset; 2629 session->iv.length = cipher_xform->iv.length; 2630 2631 /* For SEC AEAD only one descriptor is required */ 2632 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2633 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 2634 RTE_CACHE_LINE_SIZE); 2635 if (priv == NULL) { 2636 DPAA2_SEC_ERR("No Memory for priv CTXT"); 2637 return -ENOMEM; 2638 } 2639 2640 flc = &priv->flc_desc[0].flc; 2641 2642 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length, 2643 RTE_CACHE_LINE_SIZE); 2644 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) { 2645 DPAA2_SEC_ERR("No Memory for cipher key"); 2646 rte_free(priv); 2647 return -ENOMEM; 2648 } 2649 session->cipher_key.length = cipher_xform->key.length; 2650 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length, 2651 RTE_CACHE_LINE_SIZE); 2652 if (session->auth_key.data == NULL && auth_xform->key.length > 0) { 2653 DPAA2_SEC_ERR("No Memory for auth key"); 2654 rte_free(session->cipher_key.data); 2655 rte_free(priv); 2656 return -ENOMEM; 2657 } 2658 session->auth_key.length = auth_xform->key.length; 2659 memcpy(session->cipher_key.data, cipher_xform->key.data, 2660 cipher_xform->key.length); 2661 memcpy(session->auth_key.data, auth_xform->key.data, 2662 auth_xform->key.length); 2663 2664 authdata.key = (size_t)session->auth_key.data; 2665 authdata.keylen = session->auth_key.length; 2666 authdata.key_enc_flags = 0; 2667 authdata.key_type = RTA_DATA_IMM; 2668 2669 session->digest_length = auth_xform->digest_length; 2670 2671 switch (auth_xform->algo) { 2672 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2673 authdata.algtype = OP_ALG_ALGSEL_SHA1; 2674 authdata.algmode = OP_ALG_AAI_HMAC; 2675 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 2676 break; 2677 case RTE_CRYPTO_AUTH_MD5_HMAC: 2678 authdata.algtype = OP_ALG_ALGSEL_MD5; 2679 authdata.algmode = OP_ALG_AAI_HMAC; 2680 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 2681 break; 2682 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2683 authdata.algtype = OP_ALG_ALGSEL_SHA224; 2684 authdata.algmode = OP_ALG_AAI_HMAC; 2685 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 2686 break; 2687 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2688 authdata.algtype = OP_ALG_ALGSEL_SHA256; 2689 authdata.algmode = OP_ALG_AAI_HMAC; 2690 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 2691 break; 2692 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2693 authdata.algtype = OP_ALG_ALGSEL_SHA384; 2694 authdata.algmode = OP_ALG_AAI_HMAC; 2695 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 2696 break; 2697 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2698 authdata.algtype = OP_ALG_ALGSEL_SHA512; 2699 authdata.algmode = OP_ALG_AAI_HMAC; 2700 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 2701 break; 2702 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2703 authdata.algtype = OP_ALG_ALGSEL_AES; 2704 authdata.algmode = OP_ALG_AAI_XCBC_MAC; 2705 session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC; 2706 break; 2707 case RTE_CRYPTO_AUTH_AES_CMAC: 2708 authdata.algtype = OP_ALG_ALGSEL_AES; 2709 authdata.algmode = OP_ALG_AAI_CMAC; 2710 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC; 2711 break; 2712 default: 2713 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %s (%u)", 2714 rte_cryptodev_get_auth_algo_string(auth_xform->algo), 2715 auth_xform->algo); 2716 ret = -ENOTSUP; 2717 goto error_out; 2718 } 2719 cipherdata.key = (size_t)session->cipher_key.data; 2720 cipherdata.keylen = session->cipher_key.length; 2721 cipherdata.key_enc_flags = 0; 2722 cipherdata.key_type = RTA_DATA_IMM; 2723 2724 switch (cipher_xform->algo) { 2725 case RTE_CRYPTO_CIPHER_AES_CBC: 2726 cipherdata.algtype = OP_ALG_ALGSEL_AES; 2727 cipherdata.algmode = OP_ALG_AAI_CBC; 2728 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 2729 break; 2730 case RTE_CRYPTO_CIPHER_3DES_CBC: 2731 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 2732 cipherdata.algmode = OP_ALG_AAI_CBC; 2733 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 2734 break; 2735 case RTE_CRYPTO_CIPHER_DES_CBC: 2736 cipherdata.algtype = OP_ALG_ALGSEL_DES; 2737 cipherdata.algmode = OP_ALG_AAI_CBC; 2738 session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC; 2739 break; 2740 case RTE_CRYPTO_CIPHER_AES_CTR: 2741 cipherdata.algtype = OP_ALG_ALGSEL_AES; 2742 cipherdata.algmode = OP_ALG_AAI_CTR; 2743 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 2744 break; 2745 default: 2746 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %s (%u)", 2747 rte_cryptodev_get_cipher_algo_string(cipher_xform->algo), 2748 cipher_xform->algo); 2749 ret = -ENOTSUP; 2750 goto error_out; 2751 } 2752 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2753 DIR_ENC : DIR_DEC; 2754 2755 priv->flc_desc[0].desc[0] = cipherdata.keylen; 2756 priv->flc_desc[0].desc[1] = authdata.keylen; 2757 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 2758 DESC_JOB_IO_LEN, 2759 (unsigned int *)priv->flc_desc[0].desc, 2760 &priv->flc_desc[0].desc[2], 2); 2761 2762 if (err < 0) { 2763 DPAA2_SEC_ERR("Crypto: Incorrect key lengths"); 2764 ret = -EINVAL; 2765 goto error_out; 2766 } 2767 if (priv->flc_desc[0].desc[2] & 1) { 2768 cipherdata.key_type = RTA_DATA_IMM; 2769 } else { 2770 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); 2771 cipherdata.key_type = RTA_DATA_PTR; 2772 } 2773 if (priv->flc_desc[0].desc[2] & (1 << 1)) { 2774 authdata.key_type = RTA_DATA_IMM; 2775 } else { 2776 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key); 2777 authdata.key_type = RTA_DATA_PTR; 2778 } 2779 priv->flc_desc[0].desc[0] = 0; 2780 priv->flc_desc[0].desc[1] = 0; 2781 priv->flc_desc[0].desc[2] = 0; 2782 2783 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) { 2784 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1, 2785 0, SHR_SERIAL, 2786 &cipherdata, &authdata, 2787 session->iv.length, 2788 session->digest_length, 2789 session->dir); 2790 if (bufsize < 0) { 2791 DPAA2_SEC_ERR("Crypto: Invalid SEC-DESC buffer length"); 2792 ret = -EINVAL; 2793 goto error_out; 2794 } 2795 } else { 2796 DPAA2_SEC_ERR("Hash before cipher not supported"); 2797 ret = -ENOTSUP; 2798 goto error_out; 2799 } 2800 2801 flc->word1_sdl = (uint8_t)bufsize; 2802 session->ctxt = priv; 2803 #ifdef CAAM_DESC_DEBUG 2804 int i; 2805 for (i = 0; i < bufsize; i++) 2806 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", 2807 i, priv->flc_desc[0].desc[i]); 2808 #endif 2809 2810 return ret; 2811 2812 error_out: 2813 rte_free(session->cipher_key.data); 2814 rte_free(session->auth_key.data); 2815 rte_free(priv); 2816 return ret; 2817 } 2818 2819 static int 2820 dpaa2_sec_set_session_parameters(struct rte_crypto_sym_xform *xform, void *sess) 2821 { 2822 dpaa2_sec_session *session = sess; 2823 int ret; 2824 2825 PMD_INIT_FUNC_TRACE(); 2826 2827 if (unlikely(sess == NULL)) { 2828 DPAA2_SEC_ERR("Invalid session struct"); 2829 return -EINVAL; 2830 } 2831 2832 memset(session, 0, sizeof(dpaa2_sec_session)); 2833 /* Default IV length = 0 */ 2834 session->iv.length = 0; 2835 2836 /* Cipher Only */ 2837 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 2838 ret = dpaa2_sec_cipher_init(xform, session); 2839 2840 /* Authentication Only */ 2841 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2842 xform->next == NULL) { 2843 ret = dpaa2_sec_auth_init(xform, session); 2844 2845 /* Cipher then Authenticate */ 2846 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 2847 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2848 session->ext_params.aead_ctxt.auth_cipher_text = true; 2849 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL) 2850 ret = dpaa2_sec_auth_init(xform, session); 2851 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL) 2852 ret = dpaa2_sec_cipher_init(xform, session); 2853 else 2854 ret = dpaa2_sec_aead_chain_init(xform, session); 2855 /* Authenticate then Cipher */ 2856 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2857 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2858 session->ext_params.aead_ctxt.auth_cipher_text = false; 2859 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL) 2860 ret = dpaa2_sec_cipher_init(xform, session); 2861 else if (xform->next->cipher.algo == RTE_CRYPTO_CIPHER_NULL) 2862 ret = dpaa2_sec_auth_init(xform, session); 2863 else 2864 ret = dpaa2_sec_aead_chain_init(xform, session); 2865 /* AEAD operation for AES-GCM kind of Algorithms */ 2866 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && 2867 xform->next == NULL) { 2868 ret = dpaa2_sec_aead_init(xform, session); 2869 2870 } else { 2871 DPAA2_SEC_ERR("Invalid crypto type"); 2872 return -EINVAL; 2873 } 2874 2875 return ret; 2876 } 2877 2878 static int 2879 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform, 2880 dpaa2_sec_session *session, 2881 struct alginfo *aeaddata) 2882 { 2883 PMD_INIT_FUNC_TRACE(); 2884 2885 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 2886 RTE_CACHE_LINE_SIZE); 2887 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 2888 DPAA2_SEC_ERR("No Memory for aead key"); 2889 return -ENOMEM; 2890 } 2891 memcpy(session->aead_key.data, aead_xform->key.data, 2892 aead_xform->key.length); 2893 2894 session->digest_length = aead_xform->digest_length; 2895 session->aead_key.length = aead_xform->key.length; 2896 2897 aeaddata->key = (size_t)session->aead_key.data; 2898 aeaddata->keylen = session->aead_key.length; 2899 aeaddata->key_enc_flags = 0; 2900 aeaddata->key_type = RTA_DATA_IMM; 2901 2902 switch (aead_xform->algo) { 2903 case RTE_CRYPTO_AEAD_AES_GCM: 2904 switch (session->digest_length) { 2905 case 8: 2906 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM8; 2907 break; 2908 case 12: 2909 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM12; 2910 break; 2911 case 16: 2912 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM16; 2913 break; 2914 default: 2915 DPAA2_SEC_ERR("Crypto: Undefined GCM digest %d", 2916 session->digest_length); 2917 return -EINVAL; 2918 } 2919 aeaddata->algmode = OP_ALG_AAI_GCM; 2920 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; 2921 break; 2922 case RTE_CRYPTO_AEAD_AES_CCM: 2923 switch (session->digest_length) { 2924 case 8: 2925 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM8; 2926 break; 2927 case 12: 2928 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM12; 2929 break; 2930 case 16: 2931 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM16; 2932 break; 2933 default: 2934 DPAA2_SEC_ERR("Crypto: Undefined CCM digest %d", 2935 session->digest_length); 2936 return -EINVAL; 2937 } 2938 aeaddata->algmode = OP_ALG_AAI_CCM; 2939 session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM; 2940 break; 2941 default: 2942 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u", 2943 aead_xform->algo); 2944 return -ENOTSUP; 2945 } 2946 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 2947 DIR_ENC : DIR_DEC; 2948 2949 return 0; 2950 } 2951 2952 static int 2953 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform, 2954 struct rte_crypto_auth_xform *auth_xform, 2955 dpaa2_sec_session *session, 2956 struct alginfo *cipherdata, 2957 struct alginfo *authdata) 2958 { 2959 if (cipher_xform) { 2960 session->cipher_key.data = rte_zmalloc(NULL, 2961 cipher_xform->key.length, 2962 RTE_CACHE_LINE_SIZE); 2963 if (session->cipher_key.data == NULL && 2964 cipher_xform->key.length > 0) { 2965 DPAA2_SEC_ERR("No Memory for cipher key"); 2966 return -ENOMEM; 2967 } 2968 2969 session->cipher_key.length = cipher_xform->key.length; 2970 memcpy(session->cipher_key.data, cipher_xform->key.data, 2971 cipher_xform->key.length); 2972 session->cipher_alg = cipher_xform->algo; 2973 } else { 2974 session->cipher_key.data = NULL; 2975 session->cipher_key.length = 0; 2976 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2977 } 2978 2979 if (auth_xform) { 2980 session->auth_key.data = rte_zmalloc(NULL, 2981 auth_xform->key.length, 2982 RTE_CACHE_LINE_SIZE); 2983 if (session->auth_key.data == NULL && 2984 auth_xform->key.length > 0) { 2985 DPAA2_SEC_ERR("No Memory for auth key"); 2986 return -ENOMEM; 2987 } 2988 session->auth_key.length = auth_xform->key.length; 2989 memcpy(session->auth_key.data, auth_xform->key.data, 2990 auth_xform->key.length); 2991 session->auth_alg = auth_xform->algo; 2992 session->digest_length = auth_xform->digest_length; 2993 } else { 2994 session->auth_key.data = NULL; 2995 session->auth_key.length = 0; 2996 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 2997 } 2998 2999 authdata->key = (size_t)session->auth_key.data; 3000 authdata->keylen = session->auth_key.length; 3001 authdata->key_enc_flags = 0; 3002 authdata->key_type = RTA_DATA_IMM; 3003 switch (session->auth_alg) { 3004 case RTE_CRYPTO_AUTH_SHA1_HMAC: 3005 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96; 3006 authdata->algmode = OP_ALG_AAI_HMAC; 3007 break; 3008 case RTE_CRYPTO_AUTH_MD5_HMAC: 3009 authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96; 3010 authdata->algmode = OP_ALG_AAI_HMAC; 3011 break; 3012 case RTE_CRYPTO_AUTH_SHA224_HMAC: 3013 authdata->algmode = OP_ALG_AAI_HMAC; 3014 if (session->digest_length == 6) 3015 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_224_96; 3016 else if (session->digest_length == 14) 3017 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_224_224; 3018 else 3019 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_224_112; 3020 break; 3021 case RTE_CRYPTO_AUTH_SHA256_HMAC: 3022 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128; 3023 authdata->algmode = OP_ALG_AAI_HMAC; 3024 if (session->digest_length != 16) 3025 DPAA2_SEC_WARN( 3026 "+++Using sha256-hmac truncated len is non-standard," 3027 "it will not work with lookaside proto"); 3028 break; 3029 case RTE_CRYPTO_AUTH_SHA384_HMAC: 3030 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192; 3031 authdata->algmode = OP_ALG_AAI_HMAC; 3032 break; 3033 case RTE_CRYPTO_AUTH_SHA512_HMAC: 3034 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256; 3035 authdata->algmode = OP_ALG_AAI_HMAC; 3036 break; 3037 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 3038 authdata->algtype = OP_PCL_IPSEC_AES_XCBC_MAC_96; 3039 authdata->algmode = OP_ALG_AAI_XCBC_MAC; 3040 break; 3041 case RTE_CRYPTO_AUTH_AES_CMAC: 3042 authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96; 3043 authdata->algmode = OP_ALG_AAI_CMAC; 3044 break; 3045 case RTE_CRYPTO_AUTH_NULL: 3046 authdata->algtype = OP_PCL_IPSEC_HMAC_NULL; 3047 break; 3048 default: 3049 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %s (%u)", 3050 rte_cryptodev_get_auth_algo_string(session->auth_alg), 3051 session->auth_alg); 3052 return -ENOTSUP; 3053 } 3054 cipherdata->key = (size_t)session->cipher_key.data; 3055 cipherdata->keylen = session->cipher_key.length; 3056 cipherdata->key_enc_flags = 0; 3057 cipherdata->key_type = RTA_DATA_IMM; 3058 3059 switch (session->cipher_alg) { 3060 case RTE_CRYPTO_CIPHER_AES_CBC: 3061 cipherdata->algtype = OP_PCL_IPSEC_AES_CBC; 3062 cipherdata->algmode = OP_ALG_AAI_CBC; 3063 break; 3064 case RTE_CRYPTO_CIPHER_3DES_CBC: 3065 cipherdata->algtype = OP_PCL_IPSEC_3DES; 3066 cipherdata->algmode = OP_ALG_AAI_CBC; 3067 break; 3068 case RTE_CRYPTO_CIPHER_DES_CBC: 3069 cipherdata->algtype = OP_PCL_IPSEC_DES; 3070 cipherdata->algmode = OP_ALG_AAI_CBC; 3071 break; 3072 case RTE_CRYPTO_CIPHER_AES_CTR: 3073 cipherdata->algtype = OP_PCL_IPSEC_AES_CTR; 3074 cipherdata->algmode = OP_ALG_AAI_CTR; 3075 break; 3076 case RTE_CRYPTO_CIPHER_NULL: 3077 cipherdata->algtype = OP_PCL_IPSEC_NULL; 3078 break; 3079 default: 3080 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %s (%u)", 3081 rte_cryptodev_get_cipher_algo_string(session->cipher_alg), 3082 session->cipher_alg); 3083 return -ENOTSUP; 3084 } 3085 3086 return 0; 3087 } 3088 3089 static int 3090 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, 3091 struct rte_security_session_conf *conf, 3092 void *sess) 3093 { 3094 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec; 3095 struct rte_crypto_cipher_xform *cipher_xform = NULL; 3096 struct rte_crypto_auth_xform *auth_xform = NULL; 3097 struct rte_crypto_aead_xform *aead_xform = NULL; 3098 dpaa2_sec_session *session = (dpaa2_sec_session *)sess; 3099 struct ctxt_priv *priv; 3100 struct alginfo authdata, cipherdata; 3101 int bufsize; 3102 struct sec_flow_context *flc; 3103 uint64_t flc_iova; 3104 int ret = -1; 3105 3106 PMD_INIT_FUNC_TRACE(); 3107 3108 RTE_SET_USED(dev); 3109 3110 /** Make FLC address to align with stashing, low 6 bits are used 3111 * control stashing. 3112 */ 3113 priv = rte_zmalloc(NULL, sizeof(struct ctxt_priv) + 3114 sizeof(struct sec_flc_desc), 3115 DPAA2_STASHING_ALIGN_SIZE); 3116 3117 if (priv == NULL) { 3118 DPAA2_SEC_ERR("No memory for priv CTXT"); 3119 return -ENOMEM; 3120 } 3121 3122 flc = &priv->flc_desc[0].flc; 3123 3124 if (ipsec_xform->life.bytes_hard_limit != 0 || 3125 ipsec_xform->life.bytes_soft_limit != 0 || 3126 ipsec_xform->life.packets_hard_limit != 0 || 3127 ipsec_xform->life.packets_soft_limit != 0) { 3128 rte_free(priv); 3129 return -ENOTSUP; 3130 } 3131 3132 memset(session, 0, sizeof(dpaa2_sec_session)); 3133 3134 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 3135 cipher_xform = &conf->crypto_xform->cipher; 3136 if (conf->crypto_xform->next) 3137 auth_xform = &conf->crypto_xform->next->auth; 3138 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform, 3139 session, &cipherdata, &authdata); 3140 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 3141 auth_xform = &conf->crypto_xform->auth; 3142 if (conf->crypto_xform->next) 3143 cipher_xform = &conf->crypto_xform->next->cipher; 3144 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform, 3145 session, &cipherdata, &authdata); 3146 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { 3147 aead_xform = &conf->crypto_xform->aead; 3148 ret = dpaa2_sec_ipsec_aead_init(aead_xform, 3149 session, &cipherdata); 3150 authdata.keylen = 0; 3151 authdata.algtype = 0; 3152 } else { 3153 DPAA2_SEC_ERR("XFORM not specified"); 3154 ret = -EINVAL; 3155 goto out; 3156 } 3157 if (ret) { 3158 DPAA2_SEC_ERR("Failed to process xform"); 3159 goto out; 3160 } 3161 3162 session->ctxt_type = DPAA2_SEC_IPSEC; 3163 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 3164 uint8_t hdr[48] = {}; 3165 struct rte_ipv4_hdr *ip4_hdr; 3166 struct rte_ipv6_hdr *ip6_hdr; 3167 struct ipsec_encap_pdb encap_pdb; 3168 3169 flc->dhr = SEC_FLC_DHR_OUTBOUND; 3170 /* For Sec Proto only one descriptor is required. */ 3171 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb)); 3172 3173 /* copy algo specific data to PDB */ 3174 switch (cipherdata.algtype) { 3175 case OP_PCL_IPSEC_AES_CTR: 3176 encap_pdb.ctr.ctr_initial = 0x00000001; 3177 encap_pdb.ctr.ctr_nonce = ipsec_xform->salt; 3178 break; 3179 case OP_PCL_IPSEC_AES_GCM8: 3180 case OP_PCL_IPSEC_AES_GCM12: 3181 case OP_PCL_IPSEC_AES_GCM16: 3182 memcpy(encap_pdb.gcm.salt, 3183 (uint8_t *)&(ipsec_xform->salt), 4); 3184 break; 3185 } 3186 3187 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) | 3188 PDBOPTS_ESP_OIHI_PDB_INL | 3189 PDBHMO_ESP_SNR; 3190 3191 if (ipsec_xform->options.iv_gen_disable == 0) 3192 encap_pdb.options |= PDBOPTS_ESP_IVSRC; 3193 /* Initializing the sequence number to 1, Security 3194 * engine will choose this sequence number for first packet 3195 * Refer: RFC4303 section: 3.3.3.Sequence Number Generation 3196 */ 3197 encap_pdb.seq_num = 1; 3198 if (ipsec_xform->options.esn) { 3199 encap_pdb.options |= PDBOPTS_ESP_ESN; 3200 encap_pdb.seq_num_ext_hi = conf->ipsec.esn.hi; 3201 encap_pdb.seq_num = conf->ipsec.esn.low; 3202 } 3203 if (ipsec_xform->options.copy_dscp) 3204 encap_pdb.options |= PDBOPTS_ESP_DIFFSERV; 3205 if (ipsec_xform->options.ecn) 3206 encap_pdb.options |= PDBOPTS_ESP_TECN; 3207 encap_pdb.spi = ipsec_xform->spi; 3208 session->dir = DIR_ENC; 3209 if (ipsec_xform->tunnel.type == 3210 RTE_SECURITY_IPSEC_TUNNEL_IPV4) { 3211 if (ipsec_xform->options.dec_ttl) 3212 encap_pdb.options |= PDBHMO_ESP_ENCAP_DTTL; 3213 if (ipsec_xform->options.copy_df) 3214 encap_pdb.options |= PDBHMO_ESP_DFBIT; 3215 ip4_hdr = (struct rte_ipv4_hdr *)hdr; 3216 3217 encap_pdb.ip_hdr_len = sizeof(struct rte_ipv4_hdr); 3218 ip4_hdr->version_ihl = RTE_IPV4_VHL_DEF; 3219 ip4_hdr->time_to_live = ipsec_xform->tunnel.ipv4.ttl ? 3220 ipsec_xform->tunnel.ipv4.ttl : 0x40; 3221 ip4_hdr->type_of_service = (ipsec_xform->tunnel.ipv4.dscp<<2); 3222 3223 ip4_hdr->hdr_checksum = 0; 3224 ip4_hdr->packet_id = 0; 3225 if (ipsec_xform->tunnel.ipv4.df) { 3226 uint16_t frag_off = 0; 3227 3228 frag_off |= RTE_IPV4_HDR_DF_FLAG; 3229 ip4_hdr->fragment_offset = rte_cpu_to_be_16(frag_off); 3230 } else 3231 ip4_hdr->fragment_offset = 0; 3232 3233 memcpy(&ip4_hdr->src_addr, &ipsec_xform->tunnel.ipv4.src_ip, 3234 sizeof(struct in_addr)); 3235 memcpy(&ip4_hdr->dst_addr, &ipsec_xform->tunnel.ipv4.dst_ip, 3236 sizeof(struct in_addr)); 3237 if (ipsec_xform->options.udp_encap) { 3238 uint16_t sport, dport; 3239 struct rte_udp_hdr *uh = 3240 (struct rte_udp_hdr *) (hdr + 3241 sizeof(struct rte_ipv4_hdr)); 3242 3243 sport = ipsec_xform->udp.sport ? 3244 ipsec_xform->udp.sport : 4500; 3245 dport = ipsec_xform->udp.dport ? 3246 ipsec_xform->udp.dport : 4500; 3247 uh->src_port = rte_cpu_to_be_16(sport); 3248 uh->dst_port = rte_cpu_to_be_16(dport); 3249 uh->dgram_len = 0; 3250 uh->dgram_cksum = 0; 3251 3252 ip4_hdr->next_proto_id = IPPROTO_UDP; 3253 ip4_hdr->total_length = 3254 rte_cpu_to_be_16( 3255 sizeof(struct rte_ipv4_hdr) + 3256 sizeof(struct rte_udp_hdr)); 3257 encap_pdb.ip_hdr_len += 3258 sizeof(struct rte_udp_hdr); 3259 encap_pdb.options |= 3260 PDBOPTS_ESP_NAT | PDBOPTS_ESP_NUC; 3261 } else { 3262 ip4_hdr->total_length = 3263 rte_cpu_to_be_16( 3264 sizeof(struct rte_ipv4_hdr)); 3265 ip4_hdr->next_proto_id = IPPROTO_ESP; 3266 } 3267 3268 ip4_hdr->hdr_checksum = calc_chksum((uint16_t *) 3269 (void *)ip4_hdr, sizeof(struct rte_ipv4_hdr)); 3270 3271 } else if (ipsec_xform->tunnel.type == 3272 RTE_SECURITY_IPSEC_TUNNEL_IPV6) { 3273 ip6_hdr = (struct rte_ipv6_hdr *)hdr; 3274 3275 ip6_hdr->vtc_flow = rte_cpu_to_be_32( 3276 DPAA2_IPv6_DEFAULT_VTC_FLOW | 3277 ((ipsec_xform->tunnel.ipv6.dscp << 3278 RTE_IPV6_HDR_TC_SHIFT) & 3279 RTE_IPV6_HDR_TC_MASK) | 3280 ((ipsec_xform->tunnel.ipv6.flabel << 3281 RTE_IPV6_HDR_FL_SHIFT) & 3282 RTE_IPV6_HDR_FL_MASK)); 3283 /* Payload length will be updated by HW */ 3284 ip6_hdr->payload_len = 0; 3285 ip6_hdr->hop_limits = ipsec_xform->tunnel.ipv6.hlimit ? 3286 ipsec_xform->tunnel.ipv6.hlimit : 0x40; 3287 ip6_hdr->proto = (ipsec_xform->proto == 3288 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? 3289 IPPROTO_ESP : IPPROTO_AH; 3290 memcpy(&ip6_hdr->src_addr, 3291 &ipsec_xform->tunnel.ipv6.src_addr, 16); 3292 memcpy(&ip6_hdr->dst_addr, 3293 &ipsec_xform->tunnel.ipv6.dst_addr, 16); 3294 encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr); 3295 } 3296 3297 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc, 3298 1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ? 3299 SHR_WAIT : SHR_SERIAL, &encap_pdb, 3300 hdr, &cipherdata, &authdata); 3301 } else if (ipsec_xform->direction == 3302 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { 3303 struct ipsec_decap_pdb decap_pdb; 3304 3305 flc->dhr = SEC_FLC_DHR_INBOUND; 3306 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb)); 3307 /* copy algo specific data to PDB */ 3308 switch (cipherdata.algtype) { 3309 case OP_PCL_IPSEC_AES_CTR: 3310 decap_pdb.ctr.ctr_initial = 0x00000001; 3311 decap_pdb.ctr.ctr_nonce = ipsec_xform->salt; 3312 break; 3313 case OP_PCL_IPSEC_AES_GCM8: 3314 case OP_PCL_IPSEC_AES_GCM12: 3315 case OP_PCL_IPSEC_AES_GCM16: 3316 memcpy(decap_pdb.gcm.salt, 3317 (uint8_t *)&(ipsec_xform->salt), 4); 3318 break; 3319 } 3320 3321 if (ipsec_xform->tunnel.type == 3322 RTE_SECURITY_IPSEC_TUNNEL_IPV4) { 3323 decap_pdb.options = sizeof(struct ip) << 16; 3324 if (ipsec_xform->options.copy_df) 3325 decap_pdb.options |= PDBHMO_ESP_DFV; 3326 if (ipsec_xform->options.dec_ttl) 3327 decap_pdb.options |= PDBHMO_ESP_DECAP_DTTL; 3328 } else { 3329 decap_pdb.options = sizeof(struct rte_ipv6_hdr) << 16; 3330 } 3331 if (ipsec_xform->options.esn) { 3332 decap_pdb.options |= PDBOPTS_ESP_ESN; 3333 decap_pdb.seq_num_ext_hi = conf->ipsec.esn.hi; 3334 decap_pdb.seq_num = conf->ipsec.esn.low; 3335 } 3336 if (ipsec_xform->options.copy_dscp) 3337 decap_pdb.options |= PDBOPTS_ESP_DIFFSERV; 3338 if (ipsec_xform->options.ecn) 3339 decap_pdb.options |= PDBOPTS_ESP_TECN; 3340 3341 if (ipsec_xform->replay_win_sz) { 3342 uint32_t win_sz; 3343 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz); 3344 3345 if (rta_sec_era < RTA_SEC_ERA_10 && win_sz > 128) { 3346 DPAA2_SEC_INFO("Max Anti replay Win sz = 128"); 3347 win_sz = 128; 3348 } 3349 switch (win_sz) { 3350 case 1: 3351 case 2: 3352 case 4: 3353 case 8: 3354 case 16: 3355 case 32: 3356 decap_pdb.options |= PDBOPTS_ESP_ARS32; 3357 break; 3358 case 64: 3359 decap_pdb.options |= PDBOPTS_ESP_ARS64; 3360 break; 3361 case 256: 3362 decap_pdb.options |= PDBOPTS_ESP_ARS256; 3363 break; 3364 case 512: 3365 decap_pdb.options |= PDBOPTS_ESP_ARS512; 3366 break; 3367 case 1024: 3368 decap_pdb.options |= PDBOPTS_ESP_ARS1024; 3369 break; 3370 case 128: 3371 default: 3372 decap_pdb.options |= PDBOPTS_ESP_ARS128; 3373 } 3374 } 3375 session->dir = DIR_DEC; 3376 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc, 3377 1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ? 3378 SHR_WAIT : SHR_SERIAL, 3379 &decap_pdb, &cipherdata, &authdata); 3380 } else { 3381 ret = -EINVAL; 3382 goto out; 3383 } 3384 3385 if (bufsize < 0) { 3386 ret = -EINVAL; 3387 DPAA2_SEC_ERR("Crypto: Invalid SEC-DESC buffer length"); 3388 goto out; 3389 } 3390 3391 flc->word1_sdl = (uint8_t)bufsize; 3392 3393 flc_iova = DPAA2_VADDR_TO_IOVA(flc); 3394 /* Enable the stashing control bit and data stashing only.*/ 3395 DPAA2_SET_FLC_RSC(flc); 3396 dpaa2_flc_stashing_set(DPAA2_FLC_DATA_STASHING, 1, 3397 &flc_iova); 3398 flc->word2_rflc_31_0 = lower_32_bits(flc_iova); 3399 flc->word3_rflc_63_32 = upper_32_bits(flc_iova); 3400 3401 /* Set EWS bit i.e. enable write-safe */ 3402 DPAA2_SET_FLC_EWS(flc); 3403 /* Set BS = 1 i.e reuse input buffers as output buffers */ 3404 DPAA2_SET_FLC_REUSE_BS(flc); 3405 /* Set FF = 10; reuse input buffers if they provide sufficient space */ 3406 DPAA2_SET_FLC_REUSE_FF(flc); 3407 3408 session->ctxt = priv; 3409 3410 return 0; 3411 out: 3412 rte_free(session->auth_key.data); 3413 rte_free(session->cipher_key.data); 3414 rte_free(priv); 3415 return ret; 3416 } 3417 3418 static int 3419 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev, 3420 struct rte_security_session_conf *conf, 3421 void *sess) 3422 { 3423 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp; 3424 struct rte_crypto_sym_xform *xform = conf->crypto_xform; 3425 struct rte_crypto_auth_xform *auth_xform = NULL; 3426 struct rte_crypto_cipher_xform *cipher_xform = NULL; 3427 dpaa2_sec_session *session = (dpaa2_sec_session *)sess; 3428 struct ctxt_priv *priv; 3429 struct alginfo authdata, cipherdata; 3430 struct alginfo *p_authdata = NULL; 3431 int bufsize = -1; 3432 struct sec_flow_context *flc; 3433 uint64_t flc_iova; 3434 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 3435 int swap = true; 3436 #else 3437 int swap = false; 3438 #endif 3439 3440 PMD_INIT_FUNC_TRACE(); 3441 3442 RTE_SET_USED(dev); 3443 3444 memset(session, 0, sizeof(dpaa2_sec_session)); 3445 3446 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 3447 sizeof(struct ctxt_priv) + 3448 sizeof(struct sec_flc_desc), 3449 RTE_CACHE_LINE_SIZE); 3450 3451 if (priv == NULL) { 3452 DPAA2_SEC_ERR("No memory for priv CTXT"); 3453 return -ENOMEM; 3454 } 3455 3456 flc = &priv->flc_desc[0].flc; 3457 3458 /* find xfrm types */ 3459 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 3460 cipher_xform = &xform->cipher; 3461 if (xform->next != NULL && 3462 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 3463 session->ext_params.aead_ctxt.auth_cipher_text = true; 3464 auth_xform = &xform->next->auth; 3465 } 3466 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 3467 auth_xform = &xform->auth; 3468 if (xform->next != NULL && 3469 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 3470 session->ext_params.aead_ctxt.auth_cipher_text = false; 3471 cipher_xform = &xform->next->cipher; 3472 } 3473 } else { 3474 DPAA2_SEC_ERR("Invalid crypto type"); 3475 rte_free(priv); 3476 return -EINVAL; 3477 } 3478 3479 session->ctxt_type = DPAA2_SEC_PDCP; 3480 if (cipher_xform) { 3481 session->cipher_key.data = rte_zmalloc(NULL, 3482 cipher_xform->key.length, 3483 RTE_CACHE_LINE_SIZE); 3484 if (session->cipher_key.data == NULL && 3485 cipher_xform->key.length > 0) { 3486 DPAA2_SEC_ERR("No Memory for cipher key"); 3487 rte_free(priv); 3488 return -ENOMEM; 3489 } 3490 session->cipher_key.length = cipher_xform->key.length; 3491 memcpy(session->cipher_key.data, cipher_xform->key.data, 3492 cipher_xform->key.length); 3493 session->dir = 3494 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 3495 DIR_ENC : DIR_DEC; 3496 session->cipher_alg = cipher_xform->algo; 3497 } else { 3498 session->cipher_key.data = NULL; 3499 session->cipher_key.length = 0; 3500 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 3501 session->dir = DIR_ENC; 3502 } 3503 3504 session->pdcp.domain = pdcp_xform->domain; 3505 session->pdcp.bearer = pdcp_xform->bearer; 3506 session->pdcp.pkt_dir = pdcp_xform->pkt_dir; 3507 session->pdcp.sn_size = pdcp_xform->sn_size; 3508 session->pdcp.hfn = pdcp_xform->hfn; 3509 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold; 3510 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd; 3511 /* hfv ovd offset location is stored in iv.offset value*/ 3512 if (cipher_xform) 3513 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset; 3514 3515 cipherdata.key = (size_t)session->cipher_key.data; 3516 cipherdata.keylen = session->cipher_key.length; 3517 cipherdata.key_enc_flags = 0; 3518 cipherdata.key_type = RTA_DATA_IMM; 3519 3520 switch (session->cipher_alg) { 3521 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 3522 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW; 3523 break; 3524 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 3525 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC; 3526 break; 3527 case RTE_CRYPTO_CIPHER_AES_CTR: 3528 cipherdata.algtype = PDCP_CIPHER_TYPE_AES; 3529 break; 3530 case RTE_CRYPTO_CIPHER_NULL: 3531 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL; 3532 break; 3533 default: 3534 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 3535 session->cipher_alg); 3536 goto out; 3537 } 3538 3539 if (auth_xform) { 3540 session->auth_key.data = rte_zmalloc(NULL, 3541 auth_xform->key.length, 3542 RTE_CACHE_LINE_SIZE); 3543 if (!session->auth_key.data && 3544 auth_xform->key.length > 0) { 3545 DPAA2_SEC_ERR("No Memory for auth key"); 3546 rte_free(session->cipher_key.data); 3547 rte_free(priv); 3548 return -ENOMEM; 3549 } 3550 session->auth_key.length = auth_xform->key.length; 3551 memcpy(session->auth_key.data, auth_xform->key.data, 3552 auth_xform->key.length); 3553 session->auth_alg = auth_xform->algo; 3554 } else { 3555 session->auth_key.data = NULL; 3556 session->auth_key.length = 0; 3557 session->auth_alg = 0; 3558 authdata.algtype = PDCP_AUTH_TYPE_NULL; 3559 } 3560 authdata.key = (size_t)session->auth_key.data; 3561 authdata.keylen = session->auth_key.length; 3562 authdata.key_enc_flags = 0; 3563 authdata.key_type = RTA_DATA_IMM; 3564 3565 if (session->auth_alg) { 3566 switch (session->auth_alg) { 3567 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 3568 authdata.algtype = PDCP_AUTH_TYPE_SNOW; 3569 break; 3570 case RTE_CRYPTO_AUTH_ZUC_EIA3: 3571 authdata.algtype = PDCP_AUTH_TYPE_ZUC; 3572 break; 3573 case RTE_CRYPTO_AUTH_AES_CMAC: 3574 authdata.algtype = PDCP_AUTH_TYPE_AES; 3575 break; 3576 case RTE_CRYPTO_AUTH_NULL: 3577 authdata.algtype = PDCP_AUTH_TYPE_NULL; 3578 break; 3579 default: 3580 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 3581 session->auth_alg); 3582 goto out; 3583 } 3584 p_authdata = &authdata; 3585 } else { 3586 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 3587 DPAA2_SEC_ERR("Crypto: Integrity must for c-plane"); 3588 goto out; 3589 } 3590 session->auth_key.data = NULL; 3591 session->auth_key.length = 0; 3592 session->auth_alg = 0; 3593 } 3594 authdata.key = (size_t)session->auth_key.data; 3595 authdata.keylen = session->auth_key.length; 3596 authdata.key_enc_flags = 0; 3597 authdata.key_type = RTA_DATA_IMM; 3598 3599 if (pdcp_xform->sdap_enabled) { 3600 int nb_keys_to_inline = 3601 rta_inline_pdcp_sdap_query(authdata.algtype, 3602 cipherdata.algtype, 3603 session->pdcp.sn_size, 3604 session->pdcp.hfn_ovd); 3605 if (nb_keys_to_inline >= 1) { 3606 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); 3607 cipherdata.key_type = RTA_DATA_PTR; 3608 } 3609 if (nb_keys_to_inline >= 2) { 3610 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key); 3611 authdata.key_type = RTA_DATA_PTR; 3612 } 3613 } else { 3614 if (rta_inline_pdcp_query(authdata.algtype, 3615 cipherdata.algtype, 3616 session->pdcp.sn_size, 3617 session->pdcp.hfn_ovd)) { 3618 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); 3619 cipherdata.key_type = RTA_DATA_PTR; 3620 } 3621 } 3622 3623 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 3624 if (session->dir == DIR_ENC) 3625 bufsize = cnstr_shdsc_pdcp_c_plane_encap( 3626 priv->flc_desc[0].desc, 1, swap, 3627 pdcp_xform->hfn, 3628 session->pdcp.sn_size, 3629 pdcp_xform->bearer, 3630 pdcp_xform->pkt_dir, 3631 pdcp_xform->hfn_threshold, 3632 &cipherdata, &authdata); 3633 else if (session->dir == DIR_DEC) 3634 bufsize = cnstr_shdsc_pdcp_c_plane_decap( 3635 priv->flc_desc[0].desc, 1, swap, 3636 pdcp_xform->hfn, 3637 session->pdcp.sn_size, 3638 pdcp_xform->bearer, 3639 pdcp_xform->pkt_dir, 3640 pdcp_xform->hfn_threshold, 3641 &cipherdata, &authdata); 3642 3643 } else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) { 3644 bufsize = cnstr_shdsc_pdcp_short_mac(priv->flc_desc[0].desc, 3645 1, swap, &authdata); 3646 } else { 3647 if (session->dir == DIR_ENC) { 3648 if (pdcp_xform->sdap_enabled) 3649 bufsize = cnstr_shdsc_pdcp_sdap_u_plane_encap( 3650 priv->flc_desc[0].desc, 1, swap, 3651 session->pdcp.sn_size, 3652 pdcp_xform->hfn, 3653 pdcp_xform->bearer, 3654 pdcp_xform->pkt_dir, 3655 pdcp_xform->hfn_threshold, 3656 &cipherdata, p_authdata); 3657 else 3658 bufsize = cnstr_shdsc_pdcp_u_plane_encap( 3659 priv->flc_desc[0].desc, 1, swap, 3660 session->pdcp.sn_size, 3661 pdcp_xform->hfn, 3662 pdcp_xform->bearer, 3663 pdcp_xform->pkt_dir, 3664 pdcp_xform->hfn_threshold, 3665 &cipherdata, p_authdata); 3666 } else if (session->dir == DIR_DEC) { 3667 if (pdcp_xform->sdap_enabled) 3668 bufsize = cnstr_shdsc_pdcp_sdap_u_plane_decap( 3669 priv->flc_desc[0].desc, 1, swap, 3670 session->pdcp.sn_size, 3671 pdcp_xform->hfn, 3672 pdcp_xform->bearer, 3673 pdcp_xform->pkt_dir, 3674 pdcp_xform->hfn_threshold, 3675 &cipherdata, p_authdata); 3676 else 3677 bufsize = cnstr_shdsc_pdcp_u_plane_decap( 3678 priv->flc_desc[0].desc, 1, swap, 3679 session->pdcp.sn_size, 3680 pdcp_xform->hfn, 3681 pdcp_xform->bearer, 3682 pdcp_xform->pkt_dir, 3683 pdcp_xform->hfn_threshold, 3684 &cipherdata, p_authdata); 3685 } 3686 } 3687 3688 if (bufsize < 0) { 3689 DPAA2_SEC_ERR("Crypto: Invalid SEC-DESC buffer length"); 3690 goto out; 3691 } 3692 3693 flc_iova = DPAA2_VADDR_TO_IOVA(flc); 3694 /* Enable the stashing control bit and data stashing only.*/ 3695 DPAA2_SET_FLC_RSC(flc); 3696 dpaa2_flc_stashing_set(DPAA2_FLC_DATA_STASHING, 1, 3697 &flc_iova); 3698 flc->word2_rflc_31_0 = lower_32_bits(flc_iova); 3699 flc->word3_rflc_63_32 = upper_32_bits(flc_iova); 3700 3701 flc->word1_sdl = (uint8_t)bufsize; 3702 3703 /* TODO - check the perf impact or 3704 * align as per descriptor type 3705 * Set EWS bit i.e. enable write-safe 3706 * DPAA2_SET_FLC_EWS(flc); 3707 */ 3708 3709 /* Set BS = 1 i.e reuse input buffers as output buffers */ 3710 DPAA2_SET_FLC_REUSE_BS(flc); 3711 /* Set FF = 10; reuse input buffers if they provide sufficient space */ 3712 DPAA2_SET_FLC_REUSE_FF(flc); 3713 3714 session->ctxt = priv; 3715 3716 return 0; 3717 out: 3718 rte_free(session->auth_key.data); 3719 rte_free(session->cipher_key.data); 3720 rte_free(priv); 3721 return -EINVAL; 3722 } 3723 3724 static int 3725 dpaa2_sec_security_session_create(void *dev, 3726 struct rte_security_session_conf *conf, 3727 struct rte_security_session *sess) 3728 { 3729 void *sess_private_data = SECURITY_GET_SESS_PRIV(sess); 3730 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; 3731 int ret; 3732 3733 switch (conf->protocol) { 3734 case RTE_SECURITY_PROTOCOL_IPSEC: 3735 ret = dpaa2_sec_set_ipsec_session(cdev, conf, 3736 sess_private_data); 3737 break; 3738 case RTE_SECURITY_PROTOCOL_MACSEC: 3739 return -ENOTSUP; 3740 case RTE_SECURITY_PROTOCOL_PDCP: 3741 ret = dpaa2_sec_set_pdcp_session(cdev, conf, 3742 sess_private_data); 3743 break; 3744 default: 3745 return -EINVAL; 3746 } 3747 if (ret != 0) { 3748 DPAA2_SEC_DEBUG("Failed to configure session parameters %d", ret); 3749 return ret; 3750 } 3751 3752 return ret; 3753 } 3754 3755 /** Clear the memory of session so it doesn't leave key material behind */ 3756 static int 3757 dpaa2_sec_security_session_destroy(void *dev __rte_unused, 3758 struct rte_security_session *sess) 3759 { 3760 PMD_INIT_FUNC_TRACE(); 3761 void *sess_priv = SECURITY_GET_SESS_PRIV(sess); 3762 3763 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv; 3764 3765 if (sess_priv) { 3766 rte_free(s->ctxt); 3767 rte_free(s->cipher_key.data); 3768 rte_free(s->auth_key.data); 3769 memset(s, 0, sizeof(dpaa2_sec_session)); 3770 } 3771 return 0; 3772 } 3773 3774 static int 3775 dpaa2_sec_security_session_update(void *dev, 3776 struct rte_security_session *sess, 3777 struct rte_security_session_conf *conf) 3778 { 3779 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; 3780 void *sess_private_data = SECURITY_GET_SESS_PRIV(sess); 3781 int ret; 3782 3783 if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC && 3784 conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) 3785 return -ENOTSUP; 3786 3787 dpaa2_sec_security_session_destroy(dev, sess); 3788 3789 ret = dpaa2_sec_set_ipsec_session(cdev, conf, 3790 sess_private_data); 3791 if (ret != 0) { 3792 DPAA2_SEC_DEBUG("Failed to configure session parameters %d", ret); 3793 return ret; 3794 } 3795 3796 return ret; 3797 } 3798 3799 static unsigned int 3800 dpaa2_sec_security_session_get_size(void *device __rte_unused) 3801 { 3802 return sizeof(dpaa2_sec_session); 3803 } 3804 3805 static int 3806 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev __rte_unused, 3807 struct rte_crypto_sym_xform *xform, 3808 struct rte_cryptodev_sym_session *sess) 3809 { 3810 void *sess_private_data = CRYPTODEV_GET_SYM_SESS_PRIV(sess); 3811 int ret; 3812 3813 ret = dpaa2_sec_set_session_parameters(xform, sess_private_data); 3814 if (ret != 0) { 3815 DPAA2_SEC_DEBUG("Failed to configure session parameters %d", ret); 3816 /* Return session to mempool */ 3817 return ret; 3818 } 3819 3820 return 0; 3821 } 3822 3823 /** Clear the memory of session so it doesn't leave key material behind */ 3824 static void 3825 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev __rte_unused, 3826 struct rte_cryptodev_sym_session *sess) 3827 { 3828 PMD_INIT_FUNC_TRACE(); 3829 dpaa2_sec_session *s = CRYPTODEV_GET_SYM_SESS_PRIV(sess); 3830 3831 if (s) { 3832 rte_free(s->ctxt); 3833 rte_free(s->cipher_key.data); 3834 rte_free(s->auth_key.data); 3835 } 3836 } 3837 3838 static int 3839 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused, 3840 struct rte_cryptodev_config *config __rte_unused) 3841 { 3842 PMD_INIT_FUNC_TRACE(); 3843 3844 return 0; 3845 } 3846 3847 static int 3848 dpaa2_sec_dev_start(struct rte_cryptodev *dev) 3849 { 3850 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3851 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3852 struct dpseci_attr attr; 3853 struct dpaa2_queue *dpaa2_q; 3854 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3855 dev->data->queue_pairs; 3856 struct dpseci_rx_queue_attr rx_attr; 3857 struct dpseci_tx_queue_attr tx_attr; 3858 int ret, i; 3859 3860 PMD_INIT_FUNC_TRACE(); 3861 3862 /* Change the tx burst function if ordered queues are used */ 3863 if (priv->en_ordered) 3864 dev->enqueue_burst = dpaa2_sec_enqueue_burst_ordered; 3865 3866 memset(&attr, 0, sizeof(struct dpseci_attr)); 3867 3868 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token); 3869 if (ret) { 3870 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED", 3871 priv->hw_id); 3872 goto get_attr_failure; 3873 } 3874 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr); 3875 if (ret) { 3876 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC"); 3877 goto get_attr_failure; 3878 } 3879 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) { 3880 dpaa2_q = &qp[i]->rx_vq; 3881 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 3882 &rx_attr); 3883 dpaa2_q->fqid = rx_attr.fqid; 3884 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid); 3885 } 3886 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) { 3887 dpaa2_q = &qp[i]->tx_vq; 3888 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 3889 &tx_attr); 3890 dpaa2_q->fqid = tx_attr.fqid; 3891 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid); 3892 } 3893 3894 return 0; 3895 get_attr_failure: 3896 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 3897 return -1; 3898 } 3899 3900 static void 3901 dpaa2_sec_dev_stop(struct rte_cryptodev *dev) 3902 { 3903 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3904 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3905 int ret; 3906 3907 PMD_INIT_FUNC_TRACE(); 3908 3909 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 3910 if (ret) { 3911 DPAA2_SEC_ERR("Failure in disabling dpseci %d device", 3912 priv->hw_id); 3913 return; 3914 } 3915 3916 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token); 3917 if (ret < 0) { 3918 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret); 3919 return; 3920 } 3921 } 3922 3923 static int 3924 dpaa2_sec_dev_close(struct rte_cryptodev *dev __rte_unused) 3925 { 3926 PMD_INIT_FUNC_TRACE(); 3927 3928 return 0; 3929 } 3930 3931 static void 3932 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev, 3933 struct rte_cryptodev_info *info) 3934 { 3935 struct dpaa2_sec_dev_private *internals = dev->data->dev_private; 3936 3937 PMD_INIT_FUNC_TRACE(); 3938 if (info != NULL) { 3939 info->max_nb_queue_pairs = internals->max_nb_queue_pairs; 3940 info->feature_flags = dev->feature_flags; 3941 info->capabilities = dpaa2_sec_capabilities; 3942 /* No limit of number of sessions */ 3943 info->sym.max_nb_sessions = 0; 3944 info->driver_id = cryptodev_driver_id; 3945 } 3946 } 3947 3948 static 3949 void dpaa2_sec_stats_get(struct rte_cryptodev *dev, 3950 struct rte_cryptodev_stats *stats) 3951 { 3952 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3953 struct fsl_mc_io dpseci; 3954 struct dpseci_sec_counters counters = {0}; 3955 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3956 dev->data->queue_pairs; 3957 int ret, i; 3958 3959 PMD_INIT_FUNC_TRACE(); 3960 if (stats == NULL) { 3961 DPAA2_SEC_ERR("Invalid stats ptr NULL"); 3962 return; 3963 } 3964 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 3965 if (qp == NULL || qp[i] == NULL) { 3966 DPAA2_SEC_DEBUG("Uninitialised queue pair"); 3967 continue; 3968 } 3969 3970 stats->enqueued_count += qp[i]->tx_vq.tx_pkts; 3971 stats->dequeued_count += qp[i]->rx_vq.rx_pkts; 3972 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts; 3973 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts; 3974 } 3975 3976 /* In case as secondary process access stats, MCP portal in priv-hw 3977 * may have primary process address. Need the secondary process 3978 * based MCP portal address for this object. 3979 */ 3980 dpseci.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX); 3981 ret = dpseci_get_sec_counters(&dpseci, CMD_PRI_LOW, priv->token, 3982 &counters); 3983 if (ret) { 3984 DPAA2_SEC_ERR("SEC counters failed"); 3985 } else { 3986 DPAA2_SEC_INFO("dpseci hardware stats:"); 3987 DPAA2_SEC_INFO("\tNum of Requests Dequeued = %" PRIu64, 3988 counters.dequeued_requests); 3989 DPAA2_SEC_INFO("\tNum of Outbound Encrypt Requests = %" PRIu64, 3990 counters.ob_enc_requests); 3991 DPAA2_SEC_INFO("\tNum of Inbound Decrypt Requests = %" PRIu64, 3992 counters.ib_dec_requests); 3993 DPAA2_SEC_INFO("\tNum of Outbound Bytes Encrypted = %" PRIu64, 3994 counters.ob_enc_bytes); 3995 DPAA2_SEC_INFO("\tNum of Outbound Bytes Protected = %" PRIu64, 3996 counters.ob_prot_bytes); 3997 DPAA2_SEC_INFO("\tNum of Inbound Bytes Decrypted = %" PRIu64, 3998 counters.ib_dec_bytes); 3999 DPAA2_SEC_INFO("\tNum of Inbound Bytes Validated = %" PRIu64, 4000 counters.ib_valid_bytes); 4001 } 4002 } 4003 4004 static 4005 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev) 4006 { 4007 int i; 4008 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 4009 (dev->data->queue_pairs); 4010 4011 PMD_INIT_FUNC_TRACE(); 4012 4013 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 4014 if (qp[i] == NULL) { 4015 DPAA2_SEC_DEBUG("Uninitialised queue pair"); 4016 continue; 4017 } 4018 qp[i]->tx_vq.rx_pkts = 0; 4019 qp[i]->tx_vq.tx_pkts = 0; 4020 qp[i]->tx_vq.err_pkts = 0; 4021 qp[i]->rx_vq.rx_pkts = 0; 4022 qp[i]->rx_vq.tx_pkts = 0; 4023 qp[i]->rx_vq.err_pkts = 0; 4024 } 4025 } 4026 4027 static void __rte_hot 4028 dpaa2_sec_process_parallel_event(struct qbman_swp *swp, 4029 const struct qbman_fd *fd, 4030 const struct qbman_result *dq, 4031 struct dpaa2_queue *rxq, 4032 struct rte_event *ev) 4033 { 4034 struct dpaa2_sec_qp *qp; 4035 4036 qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq); 4037 ev->flow_id = rxq->ev.flow_id; 4038 ev->sub_event_type = rxq->ev.sub_event_type; 4039 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 4040 ev->op = RTE_EVENT_OP_NEW; 4041 ev->sched_type = rxq->ev.sched_type; 4042 ev->queue_id = rxq->ev.queue_id; 4043 ev->priority = rxq->ev.priority; 4044 ev->event_ptr = sec_fd_to_mbuf(fd, qp); 4045 4046 qbman_swp_dqrr_consume(swp, dq); 4047 } 4048 4049 static void 4050 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused, 4051 const struct qbman_fd *fd, 4052 const struct qbman_result *dq, 4053 struct dpaa2_queue *rxq, 4054 struct rte_event *ev) 4055 { 4056 uint8_t dqrr_index; 4057 struct dpaa2_sec_qp *qp; 4058 struct rte_crypto_op *crypto_op; 4059 4060 qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq); 4061 ev->flow_id = rxq->ev.flow_id; 4062 ev->sub_event_type = rxq->ev.sub_event_type; 4063 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 4064 ev->op = RTE_EVENT_OP_NEW; 4065 ev->sched_type = rxq->ev.sched_type; 4066 ev->queue_id = rxq->ev.queue_id; 4067 ev->priority = rxq->ev.priority; 4068 4069 crypto_op = sec_fd_to_mbuf(fd, qp); 4070 dqrr_index = qbman_get_dqrr_idx(dq); 4071 *dpaa2_seqn(crypto_op->sym->m_src) = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index; 4072 DPAA2_PER_LCORE_DQRR_SIZE++; 4073 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; 4074 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src; 4075 ev->event_ptr = crypto_op; 4076 } 4077 4078 static void __rte_hot 4079 dpaa2_sec_process_ordered_event(struct qbman_swp *swp, 4080 const struct qbman_fd *fd, 4081 const struct qbman_result *dq, 4082 struct dpaa2_queue *rxq, 4083 struct rte_event *ev) 4084 { 4085 struct rte_crypto_op *crypto_op; 4086 struct dpaa2_sec_qp *qp; 4087 4088 qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq); 4089 ev->flow_id = rxq->ev.flow_id; 4090 ev->sub_event_type = rxq->ev.sub_event_type; 4091 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 4092 ev->op = RTE_EVENT_OP_NEW; 4093 ev->sched_type = rxq->ev.sched_type; 4094 ev->queue_id = rxq->ev.queue_id; 4095 ev->priority = rxq->ev.priority; 4096 crypto_op = sec_fd_to_mbuf(fd, qp); 4097 4098 *dpaa2_seqn(crypto_op->sym->m_src) = DPAA2_ENQUEUE_FLAG_ORP; 4099 *dpaa2_seqn(crypto_op->sym->m_src) |= qbman_result_DQ_odpid(dq) << 4100 DPAA2_EQCR_OPRID_SHIFT; 4101 *dpaa2_seqn(crypto_op->sym->m_src) |= qbman_result_DQ_seqnum(dq) << 4102 DPAA2_EQCR_SEQNUM_SHIFT; 4103 4104 qbman_swp_dqrr_consume(swp, dq); 4105 ev->event_ptr = crypto_op; 4106 } 4107 4108 int 4109 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev, 4110 int qp_id, 4111 struct dpaa2_dpcon_dev *dpcon, 4112 const struct rte_event *event) 4113 { 4114 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 4115 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 4116 struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id]; 4117 struct dpseci_rx_queue_cfg cfg; 4118 uint8_t priority; 4119 int ret; 4120 4121 if (event->sched_type == RTE_SCHED_TYPE_PARALLEL) 4122 qp->rx_vq.cb = dpaa2_sec_process_parallel_event; 4123 else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) 4124 qp->rx_vq.cb = dpaa2_sec_process_atomic_event; 4125 else if (event->sched_type == RTE_SCHED_TYPE_ORDERED) 4126 qp->rx_vq.cb = dpaa2_sec_process_ordered_event; 4127 else 4128 return -EINVAL; 4129 4130 priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / event->priority) * 4131 (dpcon->num_priorities - 1); 4132 4133 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 4134 cfg.options = DPSECI_QUEUE_OPT_DEST; 4135 cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON; 4136 cfg.dest_cfg.dest_id = dpcon->dpcon_id; 4137 cfg.dest_cfg.priority = priority; 4138 4139 cfg.options |= DPSECI_QUEUE_OPT_USER_CTX; 4140 cfg.user_ctx = (size_t)(&qp->rx_vq); 4141 if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) { 4142 cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION; 4143 cfg.order_preservation_en = 1; 4144 } 4145 4146 if (event->sched_type == RTE_SCHED_TYPE_ORDERED) { 4147 struct opr_cfg ocfg; 4148 4149 /* Restoration window size = 256 frames */ 4150 ocfg.oprrws = 3; 4151 /* Restoration window size = 512 frames for LX2 */ 4152 if (dpaa2_svr_family == SVR_LX2160A) 4153 ocfg.oprrws = 4; 4154 /* Auto advance NESN window enabled */ 4155 ocfg.oa = 1; 4156 /* Late arrival window size disabled */ 4157 ocfg.olws = 0; 4158 /* ORL resource exhaustaion advance NESN disabled */ 4159 ocfg.oeane = 0; 4160 4161 if (priv->en_loose_ordered) 4162 ocfg.oloe = 1; 4163 else 4164 ocfg.oloe = 0; 4165 4166 ret = dpseci_set_opr(dpseci, CMD_PRI_LOW, priv->token, 4167 qp_id, OPR_OPT_CREATE, &ocfg); 4168 if (ret) { 4169 DPAA2_SEC_ERR("Error setting opr: ret: %d", ret); 4170 return ret; 4171 } 4172 qp->tx_vq.cb_eqresp_free = dpaa2_sec_free_eqresp_buf; 4173 priv->en_ordered = 1; 4174 } 4175 4176 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 4177 qp_id, &cfg); 4178 if (ret) { 4179 DPAA2_SEC_ERR("Error in dpseci_set_queue: ret: %d", ret); 4180 return ret; 4181 } 4182 4183 memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event)); 4184 4185 return 0; 4186 } 4187 4188 int 4189 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev, 4190 int qp_id) 4191 { 4192 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 4193 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 4194 struct dpseci_rx_queue_cfg cfg; 4195 int ret; 4196 4197 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 4198 cfg.options = DPSECI_QUEUE_OPT_DEST; 4199 cfg.dest_cfg.dest_type = DPSECI_DEST_NONE; 4200 4201 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 4202 qp_id, &cfg); 4203 if (ret) 4204 DPAA2_SEC_ERR("Error in dpseci_set_queue: ret: %d", ret); 4205 4206 return ret; 4207 } 4208 4209 static struct rte_cryptodev_ops crypto_ops = { 4210 .dev_configure = dpaa2_sec_dev_configure, 4211 .dev_start = dpaa2_sec_dev_start, 4212 .dev_stop = dpaa2_sec_dev_stop, 4213 .dev_close = dpaa2_sec_dev_close, 4214 .dev_infos_get = dpaa2_sec_dev_infos_get, 4215 .stats_get = dpaa2_sec_stats_get, 4216 .stats_reset = dpaa2_sec_stats_reset, 4217 .queue_pair_setup = dpaa2_sec_queue_pair_setup, 4218 .queue_pair_release = dpaa2_sec_queue_pair_release, 4219 .sym_session_get_size = dpaa2_sec_sym_session_get_size, 4220 .sym_session_configure = dpaa2_sec_sym_session_configure, 4221 .sym_session_clear = dpaa2_sec_sym_session_clear, 4222 /* Raw data-path API related operations */ 4223 .sym_get_raw_dp_ctx_size = dpaa2_sec_get_dp_ctx_size, 4224 .sym_configure_raw_dp_ctx = dpaa2_sec_configure_raw_dp_ctx, 4225 }; 4226 4227 static const struct rte_security_capability * 4228 dpaa2_sec_capabilities_get(void *device __rte_unused) 4229 { 4230 return dpaa2_sec_security_cap; 4231 } 4232 4233 static const struct rte_security_ops dpaa2_sec_security_ops = { 4234 .session_create = dpaa2_sec_security_session_create, 4235 .session_update = dpaa2_sec_security_session_update, 4236 .session_get_size = dpaa2_sec_security_session_get_size, 4237 .session_stats_get = NULL, 4238 .session_destroy = dpaa2_sec_security_session_destroy, 4239 .set_pkt_metadata = NULL, 4240 .capabilities_get = dpaa2_sec_capabilities_get 4241 }; 4242 4243 static int 4244 dpaa2_sec_uninit(const struct rte_cryptodev *dev) 4245 { 4246 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 4247 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 4248 int ret; 4249 4250 PMD_INIT_FUNC_TRACE(); 4251 4252 /* Function is reverse of dpaa2_sec_dev_init. 4253 * It does the following: 4254 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id 4255 * 2. Close the DPSECI device 4256 * 3. Free the allocated resources. 4257 */ 4258 4259 /*Close the device at underlying layer*/ 4260 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token); 4261 if (ret) { 4262 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret); 4263 return -1; 4264 } 4265 4266 /*Free the allocated memory for ethernet private data and dpseci*/ 4267 priv->hw = NULL; 4268 rte_free(dpseci); 4269 rte_free(dev->security_ctx); 4270 4271 DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u", 4272 dev->data->name, rte_socket_id()); 4273 4274 return 0; 4275 } 4276 4277 static int 4278 check_devargs_handler(const char *key, const char *value, 4279 void *opaque) 4280 { 4281 struct rte_cryptodev *dev = (struct rte_cryptodev *)opaque; 4282 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 4283 4284 if (!strcmp(key, "drv_strict_order")) { 4285 priv->en_loose_ordered = false; 4286 } else if (!strcmp(key, "drv_dump_mode")) { 4287 dpaa2_sec_dp_dump = atoi(value); 4288 if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_FULL_DUMP) { 4289 DPAA2_SEC_WARN("WARN: DPAA2_SEC_DP_DUMP_LEVEL is not " 4290 "supported, changing to FULL error" 4291 " prints"); 4292 dpaa2_sec_dp_dump = DPAA2_SEC_DP_FULL_DUMP; 4293 } 4294 } else 4295 return -1; 4296 4297 return 0; 4298 } 4299 4300 static void 4301 dpaa2_sec_get_devargs(struct rte_cryptodev *cryptodev, const char *key) 4302 { 4303 struct rte_kvargs *kvlist; 4304 struct rte_devargs *devargs; 4305 4306 devargs = cryptodev->device->devargs; 4307 if (!devargs) 4308 return; 4309 4310 kvlist = rte_kvargs_parse(devargs->args, NULL); 4311 if (!kvlist) 4312 return; 4313 4314 if (!rte_kvargs_count(kvlist, key)) { 4315 rte_kvargs_free(kvlist); 4316 return; 4317 } 4318 4319 rte_kvargs_process(kvlist, key, 4320 check_devargs_handler, (void *)cryptodev); 4321 rte_kvargs_free(kvlist); 4322 } 4323 4324 static int 4325 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev) 4326 { 4327 struct dpaa2_sec_dev_private *internals; 4328 struct rte_device *dev = cryptodev->device; 4329 struct rte_dpaa2_device *dpaa2_dev; 4330 struct rte_security_ctx *security_instance; 4331 struct fsl_mc_io *dpseci; 4332 uint16_t token; 4333 struct dpseci_attr attr; 4334 int retcode, hw_id; 4335 4336 PMD_INIT_FUNC_TRACE(); 4337 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 4338 hw_id = dpaa2_dev->object_id; 4339 4340 cryptodev->driver_id = cryptodev_driver_id; 4341 cryptodev->dev_ops = &crypto_ops; 4342 4343 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst; 4344 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst; 4345 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 4346 RTE_CRYPTODEV_FF_HW_ACCELERATED | 4347 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 4348 RTE_CRYPTODEV_FF_SECURITY | 4349 RTE_CRYPTODEV_FF_SYM_RAW_DP | 4350 RTE_CRYPTODEV_FF_IN_PLACE_SGL | 4351 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | 4352 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | 4353 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | 4354 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; 4355 4356 internals = cryptodev->data->dev_private; 4357 4358 /* 4359 * For secondary processes, we don't initialise any further as primary 4360 * has already done this work. Only check we don't need a different 4361 * RX function 4362 */ 4363 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 4364 DPAA2_SEC_DEBUG("Device already init by primary process"); 4365 return 0; 4366 } 4367 4368 /* Initialize security_ctx only for primary process*/ 4369 security_instance = rte_malloc("rte_security_instances_ops", 4370 sizeof(struct rte_security_ctx), 0); 4371 if (security_instance == NULL) 4372 return -ENOMEM; 4373 security_instance->device = (void *)cryptodev; 4374 security_instance->ops = &dpaa2_sec_security_ops; 4375 security_instance->sess_cnt = 0; 4376 cryptodev->security_ctx = security_instance; 4377 4378 /*Open the rte device via MC and save the handle for further use*/ 4379 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1, 4380 sizeof(struct fsl_mc_io), 0); 4381 if (!dpseci) { 4382 DPAA2_SEC_ERR( 4383 "Error in allocating the memory for dpsec object"); 4384 return -ENOMEM; 4385 } 4386 dpseci->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX); 4387 4388 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token); 4389 if (retcode != 0) { 4390 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x", 4391 retcode); 4392 goto init_error; 4393 } 4394 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr); 4395 if (retcode != 0) { 4396 DPAA2_SEC_ERR( 4397 "Cannot get dpsec device attributed: Error = %x", 4398 retcode); 4399 goto init_error; 4400 } 4401 snprintf(cryptodev->data->name, sizeof(cryptodev->data->name), 4402 "dpsec-%u", hw_id); 4403 4404 internals->max_nb_queue_pairs = attr.num_tx_queues; 4405 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs; 4406 internals->hw = dpseci; 4407 internals->token = token; 4408 internals->en_loose_ordered = true; 4409 4410 dpaa2_sec_get_devargs(cryptodev, DRIVER_DUMP_MODE); 4411 dpaa2_sec_get_devargs(cryptodev, DRIVER_STRICT_ORDER); 4412 DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name); 4413 return 0; 4414 4415 init_error: 4416 DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name); 4417 4418 /* dpaa2_sec_uninit(crypto_dev_name); */ 4419 return -EFAULT; 4420 } 4421 4422 static int 4423 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused, 4424 struct rte_dpaa2_device *dpaa2_dev) 4425 { 4426 struct rte_cryptodev *cryptodev; 4427 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 4428 4429 int retval; 4430 4431 snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d", 4432 dpaa2_dev->object_id); 4433 4434 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id()); 4435 if (cryptodev == NULL) 4436 return -ENOMEM; 4437 4438 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 4439 cryptodev->data->dev_private = rte_zmalloc_socket( 4440 "cryptodev private structure", 4441 sizeof(struct dpaa2_sec_dev_private), 4442 RTE_CACHE_LINE_SIZE, 4443 rte_socket_id()); 4444 4445 if (cryptodev->data->dev_private == NULL) 4446 rte_panic("Cannot allocate memzone for private " 4447 "device data"); 4448 } 4449 4450 dpaa2_dev->cryptodev = cryptodev; 4451 cryptodev->device = &dpaa2_dev->device; 4452 4453 /* init user callbacks */ 4454 TAILQ_INIT(&(cryptodev->link_intr_cbs)); 4455 4456 if (dpaa2_svr_family == SVR_LX2160A) 4457 rta_set_sec_era(RTA_SEC_ERA_10); 4458 else 4459 rta_set_sec_era(RTA_SEC_ERA_8); 4460 4461 DPAA2_SEC_INFO("2-SEC ERA is %d", USER_SEC_ERA(rta_get_sec_era())); 4462 4463 /* Invoke PMD device initialization function */ 4464 retval = dpaa2_sec_dev_init(cryptodev); 4465 if (retval == 0) { 4466 rte_cryptodev_pmd_probing_finish(cryptodev); 4467 return 0; 4468 } 4469 4470 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 4471 rte_free(cryptodev->data->dev_private); 4472 4473 cryptodev->attached = RTE_CRYPTODEV_DETACHED; 4474 4475 return -ENXIO; 4476 } 4477 4478 static int 4479 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev) 4480 { 4481 struct rte_cryptodev *cryptodev; 4482 int ret; 4483 4484 cryptodev = dpaa2_dev->cryptodev; 4485 if (cryptodev == NULL) 4486 return -ENODEV; 4487 4488 ret = dpaa2_sec_uninit(cryptodev); 4489 if (ret) 4490 return ret; 4491 4492 return rte_cryptodev_pmd_destroy(cryptodev); 4493 } 4494 4495 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = { 4496 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA, 4497 .drv_type = DPAA2_CRYPTO, 4498 .driver = { 4499 .name = "DPAA2 SEC PMD" 4500 }, 4501 .probe = cryptodev_dpaa2_sec_probe, 4502 .remove = cryptodev_dpaa2_sec_remove, 4503 }; 4504 4505 static struct cryptodev_driver dpaa2_sec_crypto_drv; 4506 4507 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver); 4508 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, 4509 rte_dpaa2_sec_driver.driver, cryptodev_driver_id); 4510 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_DPAA2_SEC_PMD, 4511 DRIVER_STRICT_ORDER "=<int>" 4512 DRIVER_DUMP_MODE "=<int>"); 4513 RTE_LOG_REGISTER(dpaa2_logtype_sec, pmd.crypto.dpaa2, NOTICE); 4514