1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2016-2023 NXP 5 * 6 */ 7 8 #include <time.h> 9 #include <net/if.h> 10 #include <unistd.h> 11 12 #include <rte_ip.h> 13 #include <rte_udp.h> 14 #include <rte_mbuf.h> 15 #include <rte_cryptodev.h> 16 #include <rte_malloc.h> 17 #include <rte_memcpy.h> 18 #include <rte_string_fns.h> 19 #include <rte_cycles.h> 20 #include <rte_kvargs.h> 21 #include <dev_driver.h> 22 #include <cryptodev_pmd.h> 23 #include <rte_common.h> 24 #include <bus_fslmc_driver.h> 25 #include <fslmc_vfio.h> 26 #include <dpaa2_hw_pvt.h> 27 #include <dpaa2_hw_dpio.h> 28 #include <dpaa2_hw_mempool.h> 29 #include <fsl_dpopr.h> 30 #include <fsl_dpseci.h> 31 #include <fsl_mc_sys.h> 32 #include <rte_hexdump.h> 33 34 #include "dpaa2_sec_priv.h" 35 #include "dpaa2_sec_event.h" 36 #include "dpaa2_sec_logs.h" 37 38 /* RTA header files */ 39 #include <desc/ipsec.h> 40 #include <desc/pdcp.h> 41 #include <desc/sdap.h> 42 #include <desc/algo.h> 43 44 /* Minimum job descriptor consists of a oneword job descriptor HEADER and 45 * a pointer to the shared descriptor 46 */ 47 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ) 48 #define FSL_VENDOR_ID 0x1957 49 #define FSL_DEVICE_ID 0x410 50 #define FSL_SUBSYSTEM_SEC 1 51 #define FSL_MC_DPSECI_DEVID 3 52 53 #define NO_PREFETCH 0 54 55 #define DRIVER_DUMP_MODE "drv_dump_mode" 56 #define DRIVER_STRICT_ORDER "drv_strict_order" 57 58 /* DPAA2_SEC_DP_DUMP levels */ 59 enum dpaa2_sec_dump_levels { 60 DPAA2_SEC_DP_NO_DUMP, 61 DPAA2_SEC_DP_ERR_DUMP, 62 DPAA2_SEC_DP_FULL_DUMP 63 }; 64 65 uint8_t cryptodev_driver_id; 66 uint8_t dpaa2_sec_dp_dump = DPAA2_SEC_DP_ERR_DUMP; 67 68 static inline void 69 free_fle(const struct qbman_fd *fd, struct dpaa2_sec_qp *qp) 70 { 71 struct qbman_fle *fle; 72 struct rte_crypto_op *op; 73 74 if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single) 75 return; 76 77 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); 78 op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1)); 79 /* free the fle memory */ 80 if (likely(rte_pktmbuf_is_contiguous(op->sym->m_src))) 81 rte_mempool_put(qp->fle_pool, (void *)(fle-1)); 82 else 83 rte_free((void *)(fle-1)); 84 } 85 86 static inline int 87 build_proto_compound_sg_fd(dpaa2_sec_session *sess, 88 struct rte_crypto_op *op, 89 struct qbman_fd *fd, uint16_t bpid) 90 { 91 struct rte_crypto_sym_op *sym_op = op->sym; 92 struct ctxt_priv *priv = sess->ctxt; 93 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 94 struct sec_flow_context *flc; 95 struct rte_mbuf *mbuf; 96 uint32_t in_len = 0, out_len = 0; 97 98 if (sym_op->m_dst) 99 mbuf = sym_op->m_dst; 100 else 101 mbuf = sym_op->m_src; 102 103 /* first FLE entry used to store mbuf and session ctxt */ 104 fle = (struct qbman_fle *)rte_malloc(NULL, 105 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 106 RTE_CACHE_LINE_SIZE); 107 if (unlikely(!fle)) { 108 DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE"); 109 return -ENOMEM; 110 } 111 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 112 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 113 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 114 115 /* Save the shared descriptor */ 116 flc = &priv->flc_desc[0].flc; 117 118 op_fle = fle + 1; 119 ip_fle = fle + 2; 120 sge = fle + 3; 121 122 if (likely(bpid < MAX_BPID)) { 123 DPAA2_SET_FD_BPID(fd, bpid); 124 DPAA2_SET_FLE_BPID(op_fle, bpid); 125 DPAA2_SET_FLE_BPID(ip_fle, bpid); 126 } else { 127 DPAA2_SET_FD_IVP(fd); 128 DPAA2_SET_FLE_IVP(op_fle); 129 DPAA2_SET_FLE_IVP(ip_fle); 130 } 131 132 /* Configure FD as a FRAME LIST */ 133 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 134 DPAA2_SET_FD_COMPOUND_FMT(fd); 135 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 136 137 /* Configure Output FLE with Scatter/Gather Entry */ 138 DPAA2_SET_FLE_SG_EXT(op_fle); 139 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 140 141 /* Configure Output SGE for Encap/Decap */ 142 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); 143 /* o/p segs */ 144 while (mbuf->next) { 145 sge->length = mbuf->data_len; 146 out_len += sge->length; 147 sge++; 148 mbuf = mbuf->next; 149 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); 150 } 151 /* using buf_len for last buf - so that extra data can be added */ 152 sge->length = mbuf->buf_len - mbuf->data_off; 153 out_len += sge->length; 154 155 DPAA2_SET_FLE_FIN(sge); 156 op_fle->length = out_len; 157 158 sge++; 159 mbuf = sym_op->m_src; 160 161 /* Configure Input FLE with Scatter/Gather Entry */ 162 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 163 DPAA2_SET_FLE_SG_EXT(ip_fle); 164 DPAA2_SET_FLE_FIN(ip_fle); 165 166 /* Configure input SGE for Encap/Decap */ 167 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); 168 sge->length = mbuf->data_len; 169 in_len += sge->length; 170 171 mbuf = mbuf->next; 172 /* i/p segs */ 173 while (mbuf) { 174 sge++; 175 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); 176 sge->length = mbuf->data_len; 177 in_len += sge->length; 178 mbuf = mbuf->next; 179 } 180 ip_fle->length = in_len; 181 DPAA2_SET_FLE_FIN(sge); 182 183 /* In case of PDCP, per packet HFN is stored in 184 * mbuf priv after sym_op. 185 */ 186 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) { 187 uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op + 188 sess->pdcp.hfn_ovd_offset); 189 /* enable HFN override */ 190 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd); 191 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd); 192 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd); 193 } 194 DPAA2_SET_FD_LEN(fd, ip_fle->length); 195 196 return 0; 197 } 198 199 static inline int 200 build_proto_compound_fd(dpaa2_sec_session *sess, 201 struct rte_crypto_op *op, 202 struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp) 203 { 204 struct rte_crypto_sym_op *sym_op = op->sym; 205 struct ctxt_priv *priv = sess->ctxt; 206 struct qbman_fle *fle, *ip_fle, *op_fle; 207 struct sec_flow_context *flc; 208 struct rte_mbuf *src_mbuf = sym_op->m_src; 209 struct rte_mbuf *dst_mbuf = sym_op->m_dst; 210 int retval; 211 212 if (!dst_mbuf) 213 dst_mbuf = src_mbuf; 214 215 /* Save the shared descriptor */ 216 flc = &priv->flc_desc[0].flc; 217 218 /* we are using the first FLE entry to store Mbuf */ 219 retval = rte_mempool_get(qp->fle_pool, (void **)(&fle)); 220 if (retval) { 221 DPAA2_SEC_DP_DEBUG("Proto: Memory alloc failed"); 222 return -ENOMEM; 223 } 224 memset(fle, 0, FLE_POOL_BUF_SIZE); 225 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 226 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 227 228 op_fle = fle + 1; 229 ip_fle = fle + 2; 230 231 if (likely(bpid < MAX_BPID)) { 232 DPAA2_SET_FD_BPID(fd, bpid); 233 DPAA2_SET_FLE_BPID(op_fle, bpid); 234 DPAA2_SET_FLE_BPID(ip_fle, bpid); 235 } else { 236 DPAA2_SET_FD_IVP(fd); 237 DPAA2_SET_FLE_IVP(op_fle); 238 DPAA2_SET_FLE_IVP(ip_fle); 239 } 240 241 /* Configure FD as a FRAME LIST */ 242 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 243 DPAA2_SET_FD_COMPOUND_FMT(fd); 244 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 245 246 /* Configure Output FLE with dst mbuf data */ 247 DPAA2_SET_FLE_ADDR(op_fle, rte_pktmbuf_iova(dst_mbuf)); 248 DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len); 249 250 /* Configure Input FLE with src mbuf data */ 251 DPAA2_SET_FLE_ADDR(ip_fle, rte_pktmbuf_iova(src_mbuf)); 252 DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len); 253 254 DPAA2_SET_FD_LEN(fd, ip_fle->length); 255 DPAA2_SET_FLE_FIN(ip_fle); 256 257 /* In case of PDCP, per packet HFN is stored in 258 * mbuf priv after sym_op. 259 */ 260 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) { 261 uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op + 262 sess->pdcp.hfn_ovd_offset); 263 /* enable HFN override */ 264 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd); 265 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd); 266 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd); 267 } 268 269 return 0; 270 271 } 272 273 static inline int 274 build_proto_fd(dpaa2_sec_session *sess, 275 struct rte_crypto_op *op, 276 struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp) 277 { 278 struct rte_crypto_sym_op *sym_op = op->sym; 279 if (sym_op->m_dst) 280 return build_proto_compound_fd(sess, op, fd, bpid, qp); 281 282 struct ctxt_priv *priv = sess->ctxt; 283 struct sec_flow_context *flc; 284 struct rte_mbuf *mbuf = sym_op->m_src; 285 286 if (likely(bpid < MAX_BPID)) 287 DPAA2_SET_FD_BPID(fd, bpid); 288 else 289 DPAA2_SET_FD_IVP(fd); 290 291 /* Save the shared descriptor */ 292 flc = &priv->flc_desc[0].flc; 293 294 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 295 DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off); 296 DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len); 297 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 298 299 /* save physical address of mbuf */ 300 op->sym->aead.digest.phys_addr = mbuf->buf_iova; 301 mbuf->buf_iova = (size_t)op; 302 303 return 0; 304 } 305 306 static inline int 307 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess, 308 struct rte_crypto_op *op, 309 struct qbman_fd *fd, __rte_unused uint16_t bpid) 310 { 311 struct rte_crypto_sym_op *sym_op = op->sym; 312 struct ctxt_priv *priv = sess->ctxt; 313 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 314 struct sec_flow_context *flc; 315 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len; 316 int icv_len = sess->digest_length; 317 uint8_t *old_icv; 318 struct rte_mbuf *mbuf; 319 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 320 sess->iv.offset); 321 322 if (sym_op->m_dst) 323 mbuf = sym_op->m_dst; 324 else 325 mbuf = sym_op->m_src; 326 327 /* first FLE entry used to store mbuf and session ctxt */ 328 fle = (struct qbman_fle *)rte_malloc(NULL, 329 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 330 RTE_CACHE_LINE_SIZE); 331 if (unlikely(!fle)) { 332 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE"); 333 return -ENOMEM; 334 } 335 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 336 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 337 DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv); 338 339 op_fle = fle + 1; 340 ip_fle = fle + 2; 341 sge = fle + 3; 342 343 /* Save the shared descriptor */ 344 flc = &priv->flc_desc[0].flc; 345 346 /* Configure FD as a FRAME LIST */ 347 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 348 DPAA2_SET_FD_COMPOUND_FMT(fd); 349 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 350 351 DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n" 352 "iv-len=%d data_off: 0x%x\n", 353 sym_op->aead.data.offset, 354 sym_op->aead.data.length, 355 sess->digest_length, 356 sess->iv.length, 357 sym_op->m_src->data_off); 358 359 /* Configure Output FLE with Scatter/Gather Entry */ 360 DPAA2_SET_FLE_SG_EXT(op_fle); 361 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 362 363 if (auth_only_len) 364 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len); 365 366 op_fle->length = (sess->dir == DIR_ENC) ? 367 (sym_op->aead.data.length + icv_len) : 368 sym_op->aead.data.length; 369 370 /* Configure Output SGE for Encap/Decap */ 371 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + sym_op->aead.data.offset); 372 sge->length = mbuf->data_len - sym_op->aead.data.offset; 373 374 mbuf = mbuf->next; 375 /* o/p segs */ 376 while (mbuf) { 377 sge++; 378 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); 379 sge->length = mbuf->data_len; 380 mbuf = mbuf->next; 381 } 382 sge->length -= icv_len; 383 384 if (sess->dir == DIR_ENC) { 385 sge++; 386 DPAA2_SET_FLE_ADDR(sge, 387 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data)); 388 sge->length = icv_len; 389 } 390 DPAA2_SET_FLE_FIN(sge); 391 392 sge++; 393 mbuf = sym_op->m_src; 394 395 /* Configure Input FLE with Scatter/Gather Entry */ 396 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 397 DPAA2_SET_FLE_SG_EXT(ip_fle); 398 DPAA2_SET_FLE_FIN(ip_fle); 399 ip_fle->length = (sess->dir == DIR_ENC) ? 400 (sym_op->aead.data.length + sess->iv.length + auth_only_len) : 401 (sym_op->aead.data.length + sess->iv.length + auth_only_len + 402 icv_len); 403 404 /* Configure Input SGE for Encap/Decap */ 405 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr)); 406 sge->length = sess->iv.length; 407 408 sge++; 409 if (auth_only_len) { 410 DPAA2_SET_FLE_ADDR(sge, 411 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data)); 412 sge->length = auth_only_len; 413 sge++; 414 } 415 416 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + sym_op->aead.data.offset); 417 sge->length = mbuf->data_len - sym_op->aead.data.offset; 418 419 mbuf = mbuf->next; 420 /* i/p segs */ 421 while (mbuf) { 422 sge++; 423 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); 424 sge->length = mbuf->data_len; 425 mbuf = mbuf->next; 426 } 427 428 if (sess->dir == DIR_DEC) { 429 sge++; 430 old_icv = (uint8_t *)(sge + 1); 431 memcpy(old_icv, sym_op->aead.digest.data, icv_len); 432 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 433 sge->length = icv_len; 434 } 435 436 DPAA2_SET_FLE_FIN(sge); 437 if (auth_only_len) { 438 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len); 439 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 440 } 441 DPAA2_SET_FD_LEN(fd, ip_fle->length); 442 443 return 0; 444 } 445 446 static inline int 447 build_authenc_gcm_fd(dpaa2_sec_session *sess, 448 struct rte_crypto_op *op, 449 struct qbman_fd *fd, uint16_t bpid, 450 struct dpaa2_sec_qp *qp) 451 { 452 struct rte_crypto_sym_op *sym_op = op->sym; 453 struct ctxt_priv *priv = sess->ctxt; 454 struct qbman_fle *fle, *sge; 455 struct sec_flow_context *flc; 456 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len; 457 int icv_len = sess->digest_length, retval; 458 uint8_t *old_icv; 459 struct rte_mbuf *dst; 460 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 461 sess->iv.offset); 462 463 if (sym_op->m_dst) 464 dst = sym_op->m_dst; 465 else 466 dst = sym_op->m_src; 467 468 /* TODO we are using the first FLE entry to store Mbuf and session ctxt. 469 * Currently we donot know which FLE has the mbuf stored. 470 * So while retreiving we can go back 1 FLE from the FD -ADDR 471 * to get the MBUF Addr from the previous FLE. 472 * We can have a better approach to use the inline Mbuf 473 */ 474 retval = rte_mempool_get(qp->fle_pool, (void **)(&fle)); 475 if (retval) { 476 DPAA2_SEC_DP_DEBUG("GCM: no buffer available in fle pool"); 477 return -ENOMEM; 478 } 479 memset(fle, 0, FLE_POOL_BUF_SIZE); 480 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 481 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 482 fle = fle + 1; 483 sge = fle + 2; 484 if (likely(bpid < MAX_BPID)) { 485 DPAA2_SET_FD_BPID(fd, bpid); 486 DPAA2_SET_FLE_BPID(fle, bpid); 487 DPAA2_SET_FLE_BPID(fle + 1, bpid); 488 DPAA2_SET_FLE_BPID(sge, bpid); 489 DPAA2_SET_FLE_BPID(sge + 1, bpid); 490 DPAA2_SET_FLE_BPID(sge + 2, bpid); 491 DPAA2_SET_FLE_BPID(sge + 3, bpid); 492 } else { 493 DPAA2_SET_FD_IVP(fd); 494 DPAA2_SET_FLE_IVP(fle); 495 DPAA2_SET_FLE_IVP((fle + 1)); 496 DPAA2_SET_FLE_IVP(sge); 497 DPAA2_SET_FLE_IVP((sge + 1)); 498 DPAA2_SET_FLE_IVP((sge + 2)); 499 DPAA2_SET_FLE_IVP((sge + 3)); 500 } 501 502 /* Save the shared descriptor */ 503 flc = &priv->flc_desc[0].flc; 504 /* Configure FD as a FRAME LIST */ 505 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 506 DPAA2_SET_FD_COMPOUND_FMT(fd); 507 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 508 509 DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n" 510 "iv-len=%d data_off: 0x%x\n", 511 sym_op->aead.data.offset, 512 sym_op->aead.data.length, 513 sess->digest_length, 514 sess->iv.length, 515 sym_op->m_src->data_off); 516 517 /* Configure Output FLE with Scatter/Gather Entry */ 518 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 519 if (auth_only_len) 520 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 521 fle->length = (sess->dir == DIR_ENC) ? 522 (sym_op->aead.data.length + icv_len) : 523 sym_op->aead.data.length; 524 525 DPAA2_SET_FLE_SG_EXT(fle); 526 527 /* Configure Output SGE for Encap/Decap */ 528 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(dst) + sym_op->aead.data.offset); 529 sge->length = sym_op->aead.data.length; 530 531 if (sess->dir == DIR_ENC) { 532 sge++; 533 DPAA2_SET_FLE_ADDR(sge, 534 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data)); 535 sge->length = sess->digest_length; 536 } 537 DPAA2_SET_FLE_FIN(sge); 538 539 sge++; 540 fle++; 541 542 /* Configure Input FLE with Scatter/Gather Entry */ 543 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 544 DPAA2_SET_FLE_SG_EXT(fle); 545 DPAA2_SET_FLE_FIN(fle); 546 fle->length = (sess->dir == DIR_ENC) ? 547 (sym_op->aead.data.length + sess->iv.length + auth_only_len) : 548 (sym_op->aead.data.length + sess->iv.length + auth_only_len + 549 sess->digest_length); 550 551 /* Configure Input SGE for Encap/Decap */ 552 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr)); 553 sge->length = sess->iv.length; 554 sge++; 555 if (auth_only_len) { 556 DPAA2_SET_FLE_ADDR(sge, 557 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data)); 558 sge->length = auth_only_len; 559 DPAA2_SET_FLE_BPID(sge, bpid); 560 sge++; 561 } 562 563 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(sym_op->m_src) + sym_op->aead.data.offset); 564 sge->length = sym_op->aead.data.length; 565 if (sess->dir == DIR_DEC) { 566 sge++; 567 old_icv = (uint8_t *)(sge + 1); 568 memcpy(old_icv, sym_op->aead.digest.data, 569 sess->digest_length); 570 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 571 sge->length = sess->digest_length; 572 } 573 DPAA2_SET_FLE_FIN(sge); 574 575 if (auth_only_len) { 576 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 577 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 578 } 579 580 DPAA2_SET_FD_LEN(fd, fle->length); 581 return 0; 582 } 583 584 static inline int 585 build_authenc_sg_fd(dpaa2_sec_session *sess, 586 struct rte_crypto_op *op, 587 struct qbman_fd *fd, __rte_unused uint16_t bpid) 588 { 589 struct rte_crypto_sym_op *sym_op = op->sym; 590 struct ctxt_priv *priv = sess->ctxt; 591 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 592 struct sec_flow_context *flc; 593 uint16_t auth_hdr_len = sym_op->cipher.data.offset - 594 sym_op->auth.data.offset; 595 uint16_t auth_tail_len = sym_op->auth.data.length - 596 sym_op->cipher.data.length - auth_hdr_len; 597 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len; 598 int icv_len = sess->digest_length; 599 uint8_t *old_icv; 600 struct rte_mbuf *mbuf; 601 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 602 sess->iv.offset); 603 604 if (sym_op->m_dst) 605 mbuf = sym_op->m_dst; 606 else 607 mbuf = sym_op->m_src; 608 609 /* first FLE entry used to store mbuf and session ctxt */ 610 fle = (struct qbman_fle *)rte_malloc(NULL, 611 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 612 RTE_CACHE_LINE_SIZE); 613 if (unlikely(!fle)) { 614 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE"); 615 return -ENOMEM; 616 } 617 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 618 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 619 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 620 621 op_fle = fle + 1; 622 ip_fle = fle + 2; 623 sge = fle + 3; 624 625 /* Save the shared descriptor */ 626 flc = &priv->flc_desc[0].flc; 627 628 /* Configure FD as a FRAME LIST */ 629 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 630 DPAA2_SET_FD_COMPOUND_FMT(fd); 631 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 632 633 DPAA2_SEC_DP_DEBUG( 634 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n" 635 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", 636 sym_op->auth.data.offset, 637 sym_op->auth.data.length, 638 sess->digest_length, 639 sym_op->cipher.data.offset, 640 sym_op->cipher.data.length, 641 sess->iv.length, 642 sym_op->m_src->data_off); 643 644 /* Configure Output FLE with Scatter/Gather Entry */ 645 DPAA2_SET_FLE_SG_EXT(op_fle); 646 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 647 648 if (auth_only_len) 649 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len); 650 651 op_fle->length = (sess->dir == DIR_ENC) ? 652 (sym_op->cipher.data.length + icv_len) : 653 sym_op->cipher.data.length; 654 655 /* Configure Output SGE for Encap/Decap */ 656 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + sym_op->auth.data.offset); 657 sge->length = mbuf->data_len - sym_op->auth.data.offset; 658 659 mbuf = mbuf->next; 660 /* o/p segs */ 661 while (mbuf) { 662 sge++; 663 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); 664 sge->length = mbuf->data_len; 665 mbuf = mbuf->next; 666 } 667 sge->length -= icv_len; 668 669 if (sess->dir == DIR_ENC) { 670 sge++; 671 DPAA2_SET_FLE_ADDR(sge, 672 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 673 sge->length = icv_len; 674 } 675 DPAA2_SET_FLE_FIN(sge); 676 677 sge++; 678 mbuf = sym_op->m_src; 679 680 /* Configure Input FLE with Scatter/Gather Entry */ 681 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 682 DPAA2_SET_FLE_SG_EXT(ip_fle); 683 DPAA2_SET_FLE_FIN(ip_fle); 684 ip_fle->length = (sess->dir == DIR_ENC) ? 685 (sym_op->auth.data.length + sess->iv.length) : 686 (sym_op->auth.data.length + sess->iv.length + 687 icv_len); 688 689 /* Configure Input SGE for Encap/Decap */ 690 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 691 sge->length = sess->iv.length; 692 693 sge++; 694 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + sym_op->auth.data.offset); 695 sge->length = mbuf->data_len - sym_op->auth.data.offset; 696 697 mbuf = mbuf->next; 698 /* i/p segs */ 699 while (mbuf) { 700 sge++; 701 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); 702 sge->length = mbuf->data_len; 703 mbuf = mbuf->next; 704 } 705 sge->length -= icv_len; 706 707 if (sess->dir == DIR_DEC) { 708 sge++; 709 old_icv = (uint8_t *)(sge + 1); 710 memcpy(old_icv, sym_op->auth.digest.data, 711 icv_len); 712 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 713 sge->length = icv_len; 714 } 715 716 DPAA2_SET_FLE_FIN(sge); 717 if (auth_only_len) { 718 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len); 719 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 720 } 721 DPAA2_SET_FD_LEN(fd, ip_fle->length); 722 723 return 0; 724 } 725 726 static inline int 727 build_authenc_fd(dpaa2_sec_session *sess, 728 struct rte_crypto_op *op, 729 struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp) 730 { 731 struct rte_crypto_sym_op *sym_op = op->sym; 732 struct ctxt_priv *priv = sess->ctxt; 733 struct qbman_fle *fle, *sge; 734 struct sec_flow_context *flc; 735 uint16_t auth_hdr_len = sym_op->cipher.data.offset - 736 sym_op->auth.data.offset; 737 uint16_t auth_tail_len = sym_op->auth.data.length - 738 sym_op->cipher.data.length - auth_hdr_len; 739 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len; 740 741 int icv_len = sess->digest_length, retval; 742 uint8_t *old_icv; 743 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 744 sess->iv.offset); 745 struct rte_mbuf *dst; 746 747 if (sym_op->m_dst) 748 dst = sym_op->m_dst; 749 else 750 dst = sym_op->m_src; 751 752 /* we are using the first FLE entry to store Mbuf. 753 * Currently we donot know which FLE has the mbuf stored. 754 * So while retreiving we can go back 1 FLE from the FD -ADDR 755 * to get the MBUF Addr from the previous FLE. 756 * We can have a better approach to use the inline Mbuf 757 */ 758 retval = rte_mempool_get(qp->fle_pool, (void **)(&fle)); 759 if (retval) { 760 DPAA2_SEC_DP_DEBUG("AUTHENC: no buffer available in fle pool"); 761 return -ENOMEM; 762 } 763 memset(fle, 0, FLE_POOL_BUF_SIZE); 764 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 765 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 766 fle = fle + 1; 767 sge = fle + 2; 768 if (likely(bpid < MAX_BPID)) { 769 DPAA2_SET_FD_BPID(fd, bpid); 770 DPAA2_SET_FLE_BPID(fle, bpid); 771 DPAA2_SET_FLE_BPID(fle + 1, bpid); 772 DPAA2_SET_FLE_BPID(sge, bpid); 773 DPAA2_SET_FLE_BPID(sge + 1, bpid); 774 DPAA2_SET_FLE_BPID(sge + 2, bpid); 775 DPAA2_SET_FLE_BPID(sge + 3, bpid); 776 } else { 777 DPAA2_SET_FD_IVP(fd); 778 DPAA2_SET_FLE_IVP(fle); 779 DPAA2_SET_FLE_IVP((fle + 1)); 780 DPAA2_SET_FLE_IVP(sge); 781 DPAA2_SET_FLE_IVP((sge + 1)); 782 DPAA2_SET_FLE_IVP((sge + 2)); 783 DPAA2_SET_FLE_IVP((sge + 3)); 784 } 785 786 /* Save the shared descriptor */ 787 flc = &priv->flc_desc[0].flc; 788 /* Configure FD as a FRAME LIST */ 789 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 790 DPAA2_SET_FD_COMPOUND_FMT(fd); 791 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 792 793 DPAA2_SEC_DP_DEBUG( 794 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n" 795 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", 796 sym_op->auth.data.offset, 797 sym_op->auth.data.length, 798 sess->digest_length, 799 sym_op->cipher.data.offset, 800 sym_op->cipher.data.length, 801 sess->iv.length, 802 sym_op->m_src->data_off); 803 804 /* Configure Output FLE with Scatter/Gather Entry */ 805 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 806 if (auth_only_len) 807 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 808 fle->length = (sess->dir == DIR_ENC) ? 809 (sym_op->cipher.data.length + icv_len) : 810 sym_op->cipher.data.length; 811 812 DPAA2_SET_FLE_SG_EXT(fle); 813 814 /* Configure Output SGE for Encap/Decap */ 815 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(dst) + sym_op->cipher.data.offset); 816 sge->length = sym_op->cipher.data.length; 817 818 if (sess->dir == DIR_ENC) { 819 sge++; 820 DPAA2_SET_FLE_ADDR(sge, 821 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 822 sge->length = sess->digest_length; 823 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 824 sess->iv.length)); 825 } 826 DPAA2_SET_FLE_FIN(sge); 827 828 sge++; 829 fle++; 830 831 /* Configure Input FLE with Scatter/Gather Entry */ 832 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 833 DPAA2_SET_FLE_SG_EXT(fle); 834 DPAA2_SET_FLE_FIN(fle); 835 fle->length = (sess->dir == DIR_ENC) ? 836 (sym_op->auth.data.length + sess->iv.length) : 837 (sym_op->auth.data.length + sess->iv.length + 838 sess->digest_length); 839 840 /* Configure Input SGE for Encap/Decap */ 841 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 842 sge->length = sess->iv.length; 843 sge++; 844 845 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(sym_op->m_src) + sym_op->auth.data.offset); 846 sge->length = sym_op->auth.data.length; 847 if (sess->dir == DIR_DEC) { 848 sge++; 849 old_icv = (uint8_t *)(sge + 1); 850 memcpy(old_icv, sym_op->auth.digest.data, 851 sess->digest_length); 852 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 853 sge->length = sess->digest_length; 854 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 855 sess->digest_length + 856 sess->iv.length)); 857 } 858 DPAA2_SET_FLE_FIN(sge); 859 if (auth_only_len) { 860 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 861 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 862 } 863 return 0; 864 } 865 866 static inline int build_auth_sg_fd( 867 dpaa2_sec_session *sess, 868 struct rte_crypto_op *op, 869 struct qbman_fd *fd, 870 __rte_unused uint16_t bpid) 871 { 872 struct rte_crypto_sym_op *sym_op = op->sym; 873 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 874 struct sec_flow_context *flc; 875 struct ctxt_priv *priv = sess->ctxt; 876 int data_len, data_offset; 877 uint8_t *old_digest; 878 struct rte_mbuf *mbuf; 879 880 data_len = sym_op->auth.data.length; 881 data_offset = sym_op->auth.data.offset; 882 883 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 884 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 885 if ((data_len & 7) || (data_offset & 7)) { 886 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes"); 887 return -ENOTSUP; 888 } 889 890 data_len = data_len >> 3; 891 data_offset = data_offset >> 3; 892 } 893 894 mbuf = sym_op->m_src; 895 fle = (struct qbman_fle *)rte_malloc(NULL, 896 FLE_SG_MEM_SIZE(mbuf->nb_segs), 897 RTE_CACHE_LINE_SIZE); 898 if (unlikely(!fle)) { 899 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE"); 900 return -ENOMEM; 901 } 902 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs)); 903 /* first FLE entry used to store mbuf and session ctxt */ 904 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 905 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 906 op_fle = fle + 1; 907 ip_fle = fle + 2; 908 sge = fle + 3; 909 910 flc = &priv->flc_desc[DESC_INITFINAL].flc; 911 /* sg FD */ 912 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 913 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 914 DPAA2_SET_FD_COMPOUND_FMT(fd); 915 916 /* o/p fle */ 917 DPAA2_SET_FLE_ADDR(op_fle, 918 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 919 op_fle->length = sess->digest_length; 920 921 /* i/p fle */ 922 DPAA2_SET_FLE_SG_EXT(ip_fle); 923 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 924 ip_fle->length = data_len; 925 926 if (sess->iv.length) { 927 uint8_t *iv_ptr; 928 929 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 930 sess->iv.offset); 931 932 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 933 iv_ptr = conv_to_snow_f9_iv(iv_ptr); 934 sge->length = 12; 935 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 936 iv_ptr = conv_to_zuc_eia_iv(iv_ptr); 937 sge->length = 8; 938 } else { 939 sge->length = sess->iv.length; 940 } 941 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 942 ip_fle->length += sge->length; 943 sge++; 944 } 945 /* i/p 1st seg */ 946 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + data_offset); 947 948 if (data_len <= (mbuf->data_len - data_offset)) { 949 sge->length = data_len; 950 data_len = 0; 951 } else { 952 sge->length = mbuf->data_len - data_offset; 953 954 /* remaining i/p segs */ 955 while ((data_len = data_len - sge->length) && 956 (mbuf = mbuf->next)) { 957 sge++; 958 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); 959 if (data_len > mbuf->data_len) 960 sge->length = mbuf->data_len; 961 else 962 sge->length = data_len; 963 } 964 } 965 966 if (sess->dir == DIR_DEC) { 967 /* Digest verification case */ 968 sge++; 969 old_digest = (uint8_t *)(sge + 1); 970 rte_memcpy(old_digest, sym_op->auth.digest.data, 971 sess->digest_length); 972 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest)); 973 sge->length = sess->digest_length; 974 ip_fle->length += sess->digest_length; 975 } 976 DPAA2_SET_FLE_FIN(sge); 977 DPAA2_SET_FLE_FIN(ip_fle); 978 DPAA2_SET_FD_LEN(fd, ip_fle->length); 979 980 return 0; 981 } 982 983 static inline int 984 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 985 struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp) 986 { 987 struct rte_crypto_sym_op *sym_op = op->sym; 988 struct qbman_fle *fle, *sge; 989 struct sec_flow_context *flc; 990 struct ctxt_priv *priv = sess->ctxt; 991 int data_len, data_offset; 992 uint8_t *old_digest; 993 int retval; 994 995 data_len = sym_op->auth.data.length; 996 data_offset = sym_op->auth.data.offset; 997 998 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 999 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 1000 if ((data_len & 7) || (data_offset & 7)) { 1001 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes"); 1002 return -ENOTSUP; 1003 } 1004 1005 data_len = data_len >> 3; 1006 data_offset = data_offset >> 3; 1007 } 1008 1009 retval = rte_mempool_get(qp->fle_pool, (void **)(&fle)); 1010 if (retval) { 1011 DPAA2_SEC_DP_DEBUG("AUTH: no buffer available in fle pool"); 1012 return -ENOMEM; 1013 } 1014 memset(fle, 0, FLE_POOL_BUF_SIZE); 1015 /* TODO we are using the first FLE entry to store Mbuf. 1016 * Currently we donot know which FLE has the mbuf stored. 1017 * So while retreiving we can go back 1 FLE from the FD -ADDR 1018 * to get the MBUF Addr from the previous FLE. 1019 * We can have a better approach to use the inline Mbuf 1020 */ 1021 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 1022 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 1023 fle = fle + 1; 1024 sge = fle + 2; 1025 1026 if (likely(bpid < MAX_BPID)) { 1027 DPAA2_SET_FD_BPID(fd, bpid); 1028 DPAA2_SET_FLE_BPID(fle, bpid); 1029 DPAA2_SET_FLE_BPID(fle + 1, bpid); 1030 DPAA2_SET_FLE_BPID(sge, bpid); 1031 DPAA2_SET_FLE_BPID(sge + 1, bpid); 1032 } else { 1033 DPAA2_SET_FD_IVP(fd); 1034 DPAA2_SET_FLE_IVP(fle); 1035 DPAA2_SET_FLE_IVP((fle + 1)); 1036 DPAA2_SET_FLE_IVP(sge); 1037 DPAA2_SET_FLE_IVP((sge + 1)); 1038 } 1039 1040 flc = &priv->flc_desc[DESC_INITFINAL].flc; 1041 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1042 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 1043 DPAA2_SET_FD_COMPOUND_FMT(fd); 1044 1045 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 1046 fle->length = sess->digest_length; 1047 fle++; 1048 1049 /* Setting input FLE */ 1050 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 1051 DPAA2_SET_FLE_SG_EXT(fle); 1052 fle->length = data_len; 1053 1054 if (sess->iv.length) { 1055 uint8_t *iv_ptr; 1056 1057 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1058 sess->iv.offset); 1059 1060 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 1061 iv_ptr = conv_to_snow_f9_iv(iv_ptr); 1062 sge->length = 12; 1063 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 1064 iv_ptr = conv_to_zuc_eia_iv(iv_ptr); 1065 sge->length = 8; 1066 } else { 1067 sge->length = sess->iv.length; 1068 } 1069 1070 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1071 fle->length = fle->length + sge->length; 1072 sge++; 1073 } 1074 1075 /* Setting data to authenticate */ 1076 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(sym_op->m_src) + data_offset); 1077 sge->length = data_len; 1078 1079 if (sess->dir == DIR_DEC) { 1080 sge++; 1081 old_digest = (uint8_t *)(sge + 1); 1082 rte_memcpy(old_digest, sym_op->auth.digest.data, 1083 sess->digest_length); 1084 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest)); 1085 sge->length = sess->digest_length; 1086 fle->length = fle->length + sess->digest_length; 1087 } 1088 1089 DPAA2_SET_FLE_FIN(sge); 1090 DPAA2_SET_FLE_FIN(fle); 1091 DPAA2_SET_FD_LEN(fd, fle->length); 1092 1093 return 0; 1094 } 1095 1096 static int 1097 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 1098 struct qbman_fd *fd, __rte_unused uint16_t bpid) 1099 { 1100 struct rte_crypto_sym_op *sym_op = op->sym; 1101 struct qbman_fle *ip_fle, *op_fle, *sge, *fle; 1102 int data_len, data_offset; 1103 struct sec_flow_context *flc; 1104 struct ctxt_priv *priv = sess->ctxt; 1105 struct rte_mbuf *mbuf; 1106 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1107 sess->iv.offset); 1108 1109 data_len = sym_op->cipher.data.length; 1110 data_offset = sym_op->cipher.data.offset; 1111 1112 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 1113 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 1114 if ((data_len & 7) || (data_offset & 7)) { 1115 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes"); 1116 return -ENOTSUP; 1117 } 1118 1119 data_len = data_len >> 3; 1120 data_offset = data_offset >> 3; 1121 } 1122 1123 if (sym_op->m_dst) 1124 mbuf = sym_op->m_dst; 1125 else 1126 mbuf = sym_op->m_src; 1127 1128 /* first FLE entry used to store mbuf and session ctxt */ 1129 fle = (struct qbman_fle *)rte_malloc(NULL, 1130 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 1131 RTE_CACHE_LINE_SIZE); 1132 if (!fle) { 1133 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE"); 1134 return -ENOMEM; 1135 } 1136 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 1137 /* first FLE entry used to store mbuf and session ctxt */ 1138 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 1139 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 1140 1141 op_fle = fle + 1; 1142 ip_fle = fle + 2; 1143 sge = fle + 3; 1144 1145 flc = &priv->flc_desc[0].flc; 1146 1147 DPAA2_SEC_DP_DEBUG( 1148 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d" 1149 " data_off: 0x%x", 1150 data_offset, 1151 data_len, 1152 sess->iv.length, 1153 sym_op->m_src->data_off); 1154 1155 /* o/p fle */ 1156 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 1157 op_fle->length = data_len; 1158 DPAA2_SET_FLE_SG_EXT(op_fle); 1159 1160 /* o/p 1st seg */ 1161 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + data_offset); 1162 sge->length = mbuf->data_len - data_offset; 1163 1164 mbuf = mbuf->next; 1165 /* o/p segs */ 1166 while (mbuf) { 1167 sge++; 1168 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); 1169 sge->length = mbuf->data_len; 1170 mbuf = mbuf->next; 1171 } 1172 DPAA2_SET_FLE_FIN(sge); 1173 1174 DPAA2_SEC_DP_DEBUG( 1175 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d", 1176 flc, fle, fle->addr_hi, fle->addr_lo, 1177 fle->length); 1178 1179 /* i/p fle */ 1180 mbuf = sym_op->m_src; 1181 sge++; 1182 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 1183 ip_fle->length = sess->iv.length + data_len; 1184 DPAA2_SET_FLE_SG_EXT(ip_fle); 1185 1186 /* i/p IV */ 1187 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1188 sge->length = sess->iv.length; 1189 1190 sge++; 1191 1192 /* i/p 1st seg */ 1193 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + data_offset); 1194 sge->length = mbuf->data_len - data_offset; 1195 1196 mbuf = mbuf->next; 1197 /* i/p segs */ 1198 while (mbuf) { 1199 sge++; 1200 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); 1201 sge->length = mbuf->data_len; 1202 mbuf = mbuf->next; 1203 } 1204 DPAA2_SET_FLE_FIN(sge); 1205 DPAA2_SET_FLE_FIN(ip_fle); 1206 1207 /* sg fd */ 1208 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 1209 DPAA2_SET_FD_LEN(fd, ip_fle->length); 1210 DPAA2_SET_FD_COMPOUND_FMT(fd); 1211 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1212 1213 DPAA2_SEC_DP_DEBUG( 1214 "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d" 1215 " off =%d, len =%d", 1216 DPAA2_GET_FD_ADDR(fd), 1217 DPAA2_GET_FD_BPID(fd), 1218 rte_dpaa2_bpid_info[bpid].meta_data_size, 1219 DPAA2_GET_FD_OFFSET(fd), 1220 DPAA2_GET_FD_LEN(fd)); 1221 return 0; 1222 } 1223 1224 static int 1225 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 1226 struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp) 1227 { 1228 struct rte_crypto_sym_op *sym_op = op->sym; 1229 struct qbman_fle *fle, *sge; 1230 int retval, data_len, data_offset; 1231 struct sec_flow_context *flc; 1232 struct ctxt_priv *priv = sess->ctxt; 1233 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1234 sess->iv.offset); 1235 struct rte_mbuf *dst; 1236 1237 data_len = sym_op->cipher.data.length; 1238 data_offset = sym_op->cipher.data.offset; 1239 1240 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 1241 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 1242 if ((data_len & 7) || (data_offset & 7)) { 1243 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes"); 1244 return -ENOTSUP; 1245 } 1246 1247 data_len = data_len >> 3; 1248 data_offset = data_offset >> 3; 1249 } 1250 1251 if (sym_op->m_dst) 1252 dst = sym_op->m_dst; 1253 else 1254 dst = sym_op->m_src; 1255 1256 retval = rte_mempool_get(qp->fle_pool, (void **)(&fle)); 1257 if (retval) { 1258 DPAA2_SEC_DP_DEBUG("CIPHER: no buffer available in fle pool"); 1259 return -ENOMEM; 1260 } 1261 memset(fle, 0, FLE_POOL_BUF_SIZE); 1262 /* TODO we are using the first FLE entry to store Mbuf. 1263 * Currently we donot know which FLE has the mbuf stored. 1264 * So while retreiving we can go back 1 FLE from the FD -ADDR 1265 * to get the MBUF Addr from the previous FLE. 1266 * We can have a better approach to use the inline Mbuf 1267 */ 1268 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 1269 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 1270 fle = fle + 1; 1271 sge = fle + 2; 1272 1273 if (likely(bpid < MAX_BPID)) { 1274 DPAA2_SET_FD_BPID(fd, bpid); 1275 DPAA2_SET_FLE_BPID(fle, bpid); 1276 DPAA2_SET_FLE_BPID(fle + 1, bpid); 1277 DPAA2_SET_FLE_BPID(sge, bpid); 1278 DPAA2_SET_FLE_BPID(sge + 1, bpid); 1279 } else { 1280 DPAA2_SET_FD_IVP(fd); 1281 DPAA2_SET_FLE_IVP(fle); 1282 DPAA2_SET_FLE_IVP((fle + 1)); 1283 DPAA2_SET_FLE_IVP(sge); 1284 DPAA2_SET_FLE_IVP((sge + 1)); 1285 } 1286 1287 flc = &priv->flc_desc[0].flc; 1288 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 1289 DPAA2_SET_FD_LEN(fd, data_len + sess->iv.length); 1290 DPAA2_SET_FD_COMPOUND_FMT(fd); 1291 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1292 1293 DPAA2_SEC_DP_DEBUG( 1294 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d," 1295 " data_off: 0x%x", 1296 data_offset, 1297 data_len, 1298 sess->iv.length, 1299 sym_op->m_src->data_off); 1300 1301 DPAA2_SET_FLE_ADDR(fle, rte_pktmbuf_iova(dst) + data_offset); 1302 1303 fle->length = data_len + sess->iv.length; 1304 1305 DPAA2_SEC_DP_DEBUG( 1306 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d", 1307 flc, fle, fle->addr_hi, fle->addr_lo, 1308 fle->length); 1309 1310 fle++; 1311 1312 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 1313 fle->length = data_len + sess->iv.length; 1314 1315 DPAA2_SET_FLE_SG_EXT(fle); 1316 1317 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1318 sge->length = sess->iv.length; 1319 1320 sge++; 1321 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(sym_op->m_src) + data_offset); 1322 1323 sge->length = data_len; 1324 DPAA2_SET_FLE_FIN(sge); 1325 DPAA2_SET_FLE_FIN(fle); 1326 1327 DPAA2_SEC_DP_DEBUG( 1328 "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d" 1329 " off =%d, len =%d", 1330 DPAA2_GET_FD_ADDR(fd), 1331 DPAA2_GET_FD_BPID(fd), 1332 rte_dpaa2_bpid_info[bpid].meta_data_size, 1333 DPAA2_GET_FD_OFFSET(fd), 1334 DPAA2_GET_FD_LEN(fd)); 1335 1336 return 0; 1337 } 1338 1339 static inline int 1340 build_sec_fd(struct rte_crypto_op *op, 1341 struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp) 1342 { 1343 int ret = -1; 1344 dpaa2_sec_session *sess; 1345 1346 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { 1347 sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session); 1348 } else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 1349 sess = SECURITY_GET_SESS_PRIV(op->sym->session); 1350 } else { 1351 DPAA2_SEC_DP_ERR("Session type invalid"); 1352 return -ENOTSUP; 1353 } 1354 1355 if (!sess) { 1356 DPAA2_SEC_DP_ERR("Session not available"); 1357 return -EINVAL; 1358 } 1359 1360 /* Any of the buffer is segmented*/ 1361 if (!rte_pktmbuf_is_contiguous(op->sym->m_src) || 1362 ((op->sym->m_dst != NULL) && 1363 !rte_pktmbuf_is_contiguous(op->sym->m_dst))) { 1364 switch (sess->ctxt_type) { 1365 case DPAA2_SEC_CIPHER: 1366 ret = build_cipher_sg_fd(sess, op, fd, bpid); 1367 break; 1368 case DPAA2_SEC_AUTH: 1369 ret = build_auth_sg_fd(sess, op, fd, bpid); 1370 break; 1371 case DPAA2_SEC_AEAD: 1372 ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid); 1373 break; 1374 case DPAA2_SEC_CIPHER_HASH: 1375 ret = build_authenc_sg_fd(sess, op, fd, bpid); 1376 break; 1377 case DPAA2_SEC_IPSEC: 1378 case DPAA2_SEC_PDCP: 1379 ret = build_proto_compound_sg_fd(sess, op, fd, bpid); 1380 break; 1381 default: 1382 DPAA2_SEC_ERR("error: Unsupported session %d", 1383 sess->ctxt_type); 1384 ret = -ENOTSUP; 1385 } 1386 } else { 1387 switch (sess->ctxt_type) { 1388 case DPAA2_SEC_CIPHER: 1389 ret = build_cipher_fd(sess, op, fd, bpid, qp); 1390 break; 1391 case DPAA2_SEC_AUTH: 1392 ret = build_auth_fd(sess, op, fd, bpid, qp); 1393 break; 1394 case DPAA2_SEC_AEAD: 1395 ret = build_authenc_gcm_fd(sess, op, fd, bpid, qp); 1396 break; 1397 case DPAA2_SEC_CIPHER_HASH: 1398 ret = build_authenc_fd(sess, op, fd, bpid, qp); 1399 break; 1400 case DPAA2_SEC_IPSEC: 1401 ret = build_proto_fd(sess, op, fd, bpid, qp); 1402 break; 1403 case DPAA2_SEC_PDCP: 1404 ret = build_proto_compound_fd(sess, op, fd, bpid, qp); 1405 break; 1406 default: 1407 DPAA2_SEC_ERR("error: Unsupported session%d", 1408 sess->ctxt_type); 1409 ret = -ENOTSUP; 1410 } 1411 } 1412 return ret; 1413 } 1414 1415 static uint16_t 1416 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, 1417 uint16_t nb_ops) 1418 { 1419 /* Function to transmit the frames to given device and VQ*/ 1420 uint32_t loop; 1421 int32_t ret; 1422 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 1423 uint32_t frames_to_send, retry_count; 1424 struct qbman_eq_desc eqdesc; 1425 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 1426 struct qbman_swp *swp; 1427 uint16_t num_tx = 0; 1428 uint32_t flags[MAX_TX_RING_SLOTS] = {0}; 1429 /*todo - need to support multiple buffer pools */ 1430 uint16_t bpid; 1431 struct rte_mempool *mb_pool; 1432 1433 if (unlikely(nb_ops == 0)) 1434 return 0; 1435 1436 if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { 1437 DPAA2_SEC_ERR("sessionless crypto op not supported"); 1438 return 0; 1439 } 1440 /*Prepare enqueue descriptor*/ 1441 qbman_eq_desc_clear(&eqdesc); 1442 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); 1443 qbman_eq_desc_set_response(&eqdesc, 0, 0); 1444 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid); 1445 1446 if (!DPAA2_PER_LCORE_DPIO) { 1447 ret = dpaa2_affine_qbman_swp(); 1448 if (ret) { 1449 DPAA2_SEC_ERR( 1450 "Failed to allocate IO portal, tid: %d", 1451 rte_gettid()); 1452 return 0; 1453 } 1454 } 1455 swp = DPAA2_PER_LCORE_PORTAL; 1456 1457 while (nb_ops) { 1458 frames_to_send = (nb_ops > dpaa2_eqcr_size) ? 1459 dpaa2_eqcr_size : nb_ops; 1460 1461 for (loop = 0; loop < frames_to_send; loop++) { 1462 if (*dpaa2_seqn((*ops)->sym->m_src)) { 1463 if (*dpaa2_seqn((*ops)->sym->m_src) & QBMAN_ENQUEUE_FLAG_DCA) { 1464 DPAA2_PER_LCORE_DQRR_SIZE--; 1465 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << 1466 *dpaa2_seqn((*ops)->sym->m_src) & 1467 QBMAN_EQCR_DCA_IDXMASK); 1468 } 1469 flags[loop] = *dpaa2_seqn((*ops)->sym->m_src); 1470 *dpaa2_seqn((*ops)->sym->m_src) = DPAA2_INVALID_MBUF_SEQN; 1471 } 1472 1473 /*Clear the unused FD fields before sending*/ 1474 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd)); 1475 mb_pool = (*ops)->sym->m_src->pool; 1476 bpid = mempool_to_bpid(mb_pool); 1477 ret = build_sec_fd(*ops, &fd_arr[loop], bpid, dpaa2_qp); 1478 if (ret) { 1479 DPAA2_SEC_DP_DEBUG("FD build failed"); 1480 goto skip_tx; 1481 } 1482 ops++; 1483 } 1484 1485 loop = 0; 1486 retry_count = 0; 1487 while (loop < frames_to_send) { 1488 ret = qbman_swp_enqueue_multiple(swp, &eqdesc, 1489 &fd_arr[loop], 1490 &flags[loop], 1491 frames_to_send - loop); 1492 if (unlikely(ret < 0)) { 1493 retry_count++; 1494 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) { 1495 num_tx += loop; 1496 nb_ops -= loop; 1497 DPAA2_SEC_DP_DEBUG("Enqueue fail"); 1498 /* freeing the fle buffers */ 1499 while (loop < frames_to_send) { 1500 free_fle(&fd_arr[loop], 1501 dpaa2_qp); 1502 loop++; 1503 } 1504 goto skip_tx; 1505 } 1506 } else { 1507 loop += ret; 1508 retry_count = 0; 1509 } 1510 } 1511 1512 num_tx += loop; 1513 nb_ops -= loop; 1514 } 1515 skip_tx: 1516 dpaa2_qp->tx_vq.tx_pkts += num_tx; 1517 dpaa2_qp->tx_vq.err_pkts += nb_ops; 1518 return num_tx; 1519 } 1520 1521 static inline struct rte_crypto_op * 1522 sec_simple_fd_to_mbuf(const struct qbman_fd *fd) 1523 { 1524 struct rte_crypto_op *op; 1525 uint16_t len = DPAA2_GET_FD_LEN(fd); 1526 int16_t diff = 0; 1527 dpaa2_sec_session *sess_priv __rte_unused; 1528 1529 if (unlikely(DPAA2_GET_FD_IVP(fd))) { 1530 DPAA2_SEC_ERR("error: non inline buffer"); 1531 return NULL; 1532 } 1533 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF( 1534 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)), 1535 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 1536 1537 diff = len - mbuf->pkt_len; 1538 mbuf->pkt_len += diff; 1539 mbuf->data_len += diff; 1540 op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova; 1541 mbuf->buf_iova = op->sym->aead.digest.phys_addr; 1542 op->sym->aead.digest.phys_addr = 0L; 1543 1544 sess_priv = SECURITY_GET_SESS_PRIV(op->sym->session); 1545 if (sess_priv->dir == DIR_ENC) 1546 mbuf->data_off += SEC_FLC_DHR_OUTBOUND; 1547 else 1548 mbuf->data_off += SEC_FLC_DHR_INBOUND; 1549 1550 if (unlikely(fd->simple.frc)) { 1551 DPAA2_SEC_ERR("SEC returned Error - %x", 1552 fd->simple.frc); 1553 op->status = RTE_CRYPTO_OP_STATUS_ERROR; 1554 } else { 1555 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 1556 } 1557 1558 return op; 1559 } 1560 1561 static inline struct rte_crypto_op * 1562 sec_fd_to_mbuf(const struct qbman_fd *fd, struct dpaa2_sec_qp *qp) 1563 { 1564 struct qbman_fle *fle; 1565 struct rte_crypto_op *op; 1566 struct rte_mbuf *dst, *src; 1567 1568 if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single) 1569 return sec_simple_fd_to_mbuf(fd); 1570 1571 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); 1572 1573 DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x", 1574 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset); 1575 1576 /* we are using the first FLE entry to store Mbuf. 1577 * Currently we donot know which FLE has the mbuf stored. 1578 * So while retreiving we can go back 1 FLE from the FD -ADDR 1579 * to get the MBUF Addr from the previous FLE. 1580 * We can have a better approach to use the inline Mbuf 1581 */ 1582 1583 op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1)); 1584 1585 /* Prefeth op */ 1586 src = op->sym->m_src; 1587 rte_prefetch0(src); 1588 1589 if (op->sym->m_dst) { 1590 dst = op->sym->m_dst; 1591 rte_prefetch0(dst); 1592 } else 1593 dst = src; 1594 1595 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 1596 uint16_t len = DPAA2_GET_FD_LEN(fd); 1597 dst->pkt_len = len; 1598 while (dst->next != NULL) { 1599 len -= dst->data_len; 1600 dst = dst->next; 1601 } 1602 dst->data_len = len; 1603 } 1604 1605 DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p," 1606 " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d", 1607 (void *)dst, 1608 dst->buf_addr, 1609 DPAA2_GET_FD_ADDR(fd), 1610 DPAA2_GET_FD_BPID(fd), 1611 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, 1612 DPAA2_GET_FD_OFFSET(fd), 1613 DPAA2_GET_FD_LEN(fd)); 1614 1615 /* free the fle memory */ 1616 if (likely(rte_pktmbuf_is_contiguous(src))) { 1617 rte_mempool_put(qp->fle_pool, (void *)(fle-1)); 1618 } else 1619 rte_free((void *)(fle-1)); 1620 1621 return op; 1622 } 1623 1624 static void 1625 dpaa2_sec_dump(struct rte_crypto_op *op, FILE *f) 1626 { 1627 int i; 1628 dpaa2_sec_session *sess = NULL; 1629 struct ctxt_priv *priv; 1630 uint8_t bufsize; 1631 struct rte_crypto_sym_op *sym_op; 1632 1633 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) 1634 sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session); 1635 #ifdef RTE_LIB_SECURITY 1636 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) 1637 sess = SECURITY_GET_SESS_PRIV(op->sym->session); 1638 #endif 1639 1640 if (sess == NULL) 1641 goto mbuf_dump; 1642 1643 priv = (struct ctxt_priv *)sess->ctxt; 1644 fprintf(f, "\n****************************************\n" 1645 "session params:\n\tContext type:\t%d\n\tDirection:\t%s\n" 1646 "\tCipher alg:\t%d\n\tAuth alg:\t%d\n\tAead alg:\t%d\n" 1647 "\tCipher key len:\t%zd\n", sess->ctxt_type, 1648 (sess->dir == DIR_ENC) ? "DIR_ENC" : "DIR_DEC", 1649 sess->cipher_alg, sess->auth_alg, sess->aead_alg, 1650 sess->cipher_key.length); 1651 rte_hexdump(f, "cipher key", sess->cipher_key.data, 1652 sess->cipher_key.length); 1653 rte_hexdump(f, "auth key", sess->auth_key.data, 1654 sess->auth_key.length); 1655 fprintf(f, "\tAuth key len:\t%zd\n\tIV len:\t\t%d\n\tIV offset:\t%d\n" 1656 "\tdigest length:\t%d\n\tstatus:\t\t%d\n\taead auth only" 1657 " len:\t%d\n\taead cipher text:\t%d\n", 1658 sess->auth_key.length, sess->iv.length, sess->iv.offset, 1659 sess->digest_length, sess->status, 1660 sess->ext_params.aead_ctxt.auth_only_len, 1661 sess->ext_params.aead_ctxt.auth_cipher_text); 1662 #ifdef RTE_LIB_SECURITY 1663 fprintf(f, "PDCP session params:\n" 1664 "\tDomain:\t\t%d\n\tBearer:\t\t%d\n\tpkt_dir:\t%d\n\thfn_ovd:" 1665 "\t%d\n\tsn_size:\t%d\n\thfn_ovd_offset:\t%d\n\thfn:\t\t%d\n" 1666 "\thfn_threshold:\t0x%x\n", sess->pdcp.domain, 1667 sess->pdcp.bearer, sess->pdcp.pkt_dir, sess->pdcp.hfn_ovd, 1668 sess->pdcp.sn_size, sess->pdcp.hfn_ovd_offset, sess->pdcp.hfn, 1669 sess->pdcp.hfn_threshold); 1670 1671 #endif 1672 bufsize = (uint8_t)priv->flc_desc[0].flc.word1_sdl; 1673 fprintf(f, "Descriptor Dump:\n"); 1674 for (i = 0; i < bufsize; i++) 1675 fprintf(f, "\tDESC[%d]:0x%x\n", i, priv->flc_desc[0].desc[i]); 1676 1677 fprintf(f, "\n"); 1678 mbuf_dump: 1679 sym_op = op->sym; 1680 if (sym_op->m_src) { 1681 fprintf(f, "Source mbuf:\n"); 1682 rte_pktmbuf_dump(f, sym_op->m_src, sym_op->m_src->data_len); 1683 } 1684 if (sym_op->m_dst) { 1685 fprintf(f, "Destination mbuf:\n"); 1686 rte_pktmbuf_dump(f, sym_op->m_dst, sym_op->m_dst->data_len); 1687 } 1688 1689 fprintf(f, "Session address = %p\ncipher offset: %d, length: %d\n" 1690 "auth offset: %d, length: %d\n aead offset: %d, length: %d\n" 1691 , sym_op->session, 1692 sym_op->cipher.data.offset, sym_op->cipher.data.length, 1693 sym_op->auth.data.offset, sym_op->auth.data.length, 1694 sym_op->aead.data.offset, sym_op->aead.data.length); 1695 fprintf(f, "\n"); 1696 1697 } 1698 1699 static void 1700 dpaa2_sec_free_eqresp_buf(uint16_t eqresp_ci, 1701 struct dpaa2_queue *dpaa2_q) 1702 { 1703 struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO; 1704 struct rte_crypto_op *op; 1705 struct qbman_fd *fd; 1706 struct dpaa2_sec_qp *dpaa2_qp; 1707 1708 dpaa2_qp = container_of(dpaa2_q, struct dpaa2_sec_qp, tx_vq); 1709 fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]); 1710 op = sec_fd_to_mbuf(fd, dpaa2_qp); 1711 /* Instead of freeing, enqueue it to the sec tx queue (sec->core) 1712 * after setting an error in FD. But this will have performance impact. 1713 */ 1714 rte_pktmbuf_free(op->sym->m_src); 1715 } 1716 1717 static void 1718 dpaa2_sec_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q, 1719 struct rte_mbuf *m, 1720 struct qbman_eq_desc *eqdesc) 1721 { 1722 struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO; 1723 struct eqresp_metadata *eqresp_meta; 1724 struct dpaa2_sec_dev_private *priv = dpaa2_q->crypto_data->dev_private; 1725 uint16_t orpid, seqnum; 1726 uint8_t dq_idx; 1727 1728 if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) { 1729 orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >> 1730 DPAA2_EQCR_OPRID_SHIFT; 1731 seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >> 1732 DPAA2_EQCR_SEQNUM_SHIFT; 1733 1734 1735 if (!priv->en_loose_ordered) { 1736 qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0); 1737 qbman_eq_desc_set_response(eqdesc, (uint64_t) 1738 DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[ 1739 dpio_dev->eqresp_pi]), 1); 1740 qbman_eq_desc_set_token(eqdesc, 1); 1741 1742 eqresp_meta = &dpio_dev->eqresp_meta[dpio_dev->eqresp_pi]; 1743 eqresp_meta->dpaa2_q = dpaa2_q; 1744 eqresp_meta->mp = m->pool; 1745 1746 dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ? 1747 dpio_dev->eqresp_pi++ : (dpio_dev->eqresp_pi = 0); 1748 } else { 1749 qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0); 1750 } 1751 } else { 1752 dq_idx = *dpaa2_seqn(m) - 1; 1753 qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0); 1754 DPAA2_PER_LCORE_DQRR_SIZE--; 1755 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx); 1756 } 1757 *dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN; 1758 } 1759 1760 1761 static uint16_t 1762 dpaa2_sec_enqueue_burst_ordered(void *qp, struct rte_crypto_op **ops, 1763 uint16_t nb_ops) 1764 { 1765 /* Function to transmit the frames to given device and VQ*/ 1766 uint32_t loop; 1767 int32_t ret; 1768 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 1769 uint32_t frames_to_send, num_free_eq_desc, retry_count; 1770 struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS]; 1771 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 1772 struct qbman_swp *swp; 1773 uint16_t num_tx = 0; 1774 uint16_t bpid; 1775 struct rte_mempool *mb_pool; 1776 struct dpaa2_sec_dev_private *priv = 1777 dpaa2_qp->tx_vq.crypto_data->dev_private; 1778 1779 if (unlikely(nb_ops == 0)) 1780 return 0; 1781 1782 if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { 1783 DPAA2_SEC_ERR("sessionless crypto op not supported"); 1784 return 0; 1785 } 1786 1787 if (!DPAA2_PER_LCORE_DPIO) { 1788 ret = dpaa2_affine_qbman_swp(); 1789 if (ret) { 1790 DPAA2_SEC_ERR("Failure in affining portal"); 1791 return 0; 1792 } 1793 } 1794 swp = DPAA2_PER_LCORE_PORTAL; 1795 1796 while (nb_ops) { 1797 frames_to_send = (nb_ops > dpaa2_eqcr_size) ? 1798 dpaa2_eqcr_size : nb_ops; 1799 1800 if (!priv->en_loose_ordered) { 1801 if (*dpaa2_seqn((*ops)->sym->m_src)) { 1802 num_free_eq_desc = dpaa2_free_eq_descriptors(); 1803 if (num_free_eq_desc < frames_to_send) 1804 frames_to_send = num_free_eq_desc; 1805 } 1806 } 1807 1808 for (loop = 0; loop < frames_to_send; loop++) { 1809 /*Prepare enqueue descriptor*/ 1810 qbman_eq_desc_clear(&eqdesc[loop]); 1811 qbman_eq_desc_set_fq(&eqdesc[loop], dpaa2_qp->tx_vq.fqid); 1812 1813 if (*dpaa2_seqn((*ops)->sym->m_src)) 1814 dpaa2_sec_set_enqueue_descriptor( 1815 &dpaa2_qp->tx_vq, 1816 (*ops)->sym->m_src, 1817 &eqdesc[loop]); 1818 else 1819 qbman_eq_desc_set_no_orp(&eqdesc[loop], 1820 DPAA2_EQ_RESP_ERR_FQ); 1821 1822 /*Clear the unused FD fields before sending*/ 1823 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd)); 1824 mb_pool = (*ops)->sym->m_src->pool; 1825 bpid = mempool_to_bpid(mb_pool); 1826 ret = build_sec_fd(*ops, &fd_arr[loop], bpid, dpaa2_qp); 1827 if (ret) { 1828 DPAA2_SEC_DP_DEBUG("FD build failed"); 1829 goto skip_tx; 1830 } 1831 ops++; 1832 } 1833 1834 loop = 0; 1835 retry_count = 0; 1836 while (loop < frames_to_send) { 1837 ret = qbman_swp_enqueue_multiple_desc(swp, 1838 &eqdesc[loop], &fd_arr[loop], 1839 frames_to_send - loop); 1840 if (unlikely(ret < 0)) { 1841 retry_count++; 1842 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) { 1843 num_tx += loop; 1844 nb_ops -= loop; 1845 DPAA2_SEC_DP_DEBUG("Enqueue fail"); 1846 /* freeing the fle buffers */ 1847 while (loop < frames_to_send) { 1848 free_fle(&fd_arr[loop], 1849 dpaa2_qp); 1850 loop++; 1851 } 1852 goto skip_tx; 1853 } 1854 } else { 1855 loop += ret; 1856 retry_count = 0; 1857 } 1858 } 1859 1860 num_tx += loop; 1861 nb_ops -= loop; 1862 } 1863 1864 skip_tx: 1865 dpaa2_qp->tx_vq.tx_pkts += num_tx; 1866 dpaa2_qp->tx_vq.err_pkts += nb_ops; 1867 return num_tx; 1868 } 1869 1870 static uint16_t 1871 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, 1872 uint16_t nb_ops) 1873 { 1874 /* Function is responsible to receive frames for a given device and VQ*/ 1875 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 1876 struct qbman_result *dq_storage; 1877 uint32_t fqid = dpaa2_qp->rx_vq.fqid; 1878 int ret, num_rx = 0; 1879 uint8_t is_last = 0, status; 1880 struct qbman_swp *swp; 1881 const struct qbman_fd *fd; 1882 struct qbman_pull_desc pulldesc; 1883 1884 if (!DPAA2_PER_LCORE_DPIO) { 1885 ret = dpaa2_affine_qbman_swp(); 1886 if (ret) { 1887 DPAA2_SEC_ERR( 1888 "Failed to allocate IO portal, tid: %d", 1889 rte_gettid()); 1890 return 0; 1891 } 1892 } 1893 swp = DPAA2_PER_LCORE_PORTAL; 1894 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0]; 1895 1896 qbman_pull_desc_clear(&pulldesc); 1897 qbman_pull_desc_set_numframes(&pulldesc, 1898 (nb_ops > dpaa2_dqrr_size) ? 1899 dpaa2_dqrr_size : nb_ops); 1900 qbman_pull_desc_set_fq(&pulldesc, fqid); 1901 qbman_pull_desc_set_storage(&pulldesc, dq_storage, 1902 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage), 1903 1); 1904 1905 /*Issue a volatile dequeue command. */ 1906 while (1) { 1907 if (qbman_swp_pull(swp, &pulldesc)) { 1908 DPAA2_SEC_WARN( 1909 "SEC VDQ command is not issued : QBMAN busy"); 1910 /* Portal was busy, try again */ 1911 continue; 1912 } 1913 break; 1914 }; 1915 1916 /* Receive the packets till Last Dequeue entry is found with 1917 * respect to the above issues PULL command. 1918 */ 1919 while (!is_last) { 1920 /* Check if the previous issued command is completed. 1921 * Also seems like the SWP is shared between the Ethernet Driver 1922 * and the SEC driver. 1923 */ 1924 while (!qbman_check_command_complete(dq_storage)) 1925 ; 1926 1927 /* Loop until the dq_storage is updated with 1928 * new token by QBMAN 1929 */ 1930 while (!qbman_check_new_result(dq_storage)) 1931 ; 1932 /* Check whether Last Pull command is Expired and 1933 * setting Condition for Loop termination 1934 */ 1935 if (qbman_result_DQ_is_pull_complete(dq_storage)) { 1936 is_last = 1; 1937 /* Check for valid frame. */ 1938 status = (uint8_t)qbman_result_DQ_flags(dq_storage); 1939 if (unlikely( 1940 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) { 1941 DPAA2_SEC_DP_DEBUG("No frame is delivered"); 1942 continue; 1943 } 1944 } 1945 1946 fd = qbman_result_DQ_fd(dq_storage); 1947 ops[num_rx] = sec_fd_to_mbuf(fd, dpaa2_qp); 1948 1949 if (unlikely(fd->simple.frc)) { 1950 /* TODO Parse SEC errors */ 1951 if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_NO_DUMP) { 1952 DPAA2_SEC_DP_ERR("SEC returned Error - %x", 1953 fd->simple.frc); 1954 if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_ERR_DUMP) 1955 dpaa2_sec_dump(ops[num_rx], stdout); 1956 } 1957 1958 dpaa2_qp->rx_vq.err_pkts += 1; 1959 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR; 1960 } else { 1961 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 1962 } 1963 1964 num_rx++; 1965 dq_storage++; 1966 } /* End of Packet Rx loop */ 1967 1968 dpaa2_qp->rx_vq.rx_pkts += num_rx; 1969 1970 DPAA2_SEC_DP_DEBUG("SEC RX pkts %d err pkts %" PRIu64, num_rx, 1971 dpaa2_qp->rx_vq.err_pkts); 1972 /*Return the total number of packets received to DPAA2 app*/ 1973 return num_rx; 1974 } 1975 1976 /** Release queue pair */ 1977 static int 1978 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) 1979 { 1980 struct dpaa2_sec_qp *qp = 1981 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id]; 1982 1983 PMD_INIT_FUNC_TRACE(); 1984 1985 if (qp->rx_vq.q_storage) { 1986 dpaa2_free_dq_storage(qp->rx_vq.q_storage); 1987 rte_free(qp->rx_vq.q_storage); 1988 } 1989 rte_mempool_free(qp->fle_pool); 1990 rte_free(qp); 1991 1992 dev->data->queue_pairs[queue_pair_id] = NULL; 1993 1994 return 0; 1995 } 1996 1997 /** Setup a queue pair */ 1998 static int 1999 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, 2000 const struct rte_cryptodev_qp_conf *qp_conf, 2001 __rte_unused int socket_id) 2002 { 2003 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 2004 struct dpaa2_sec_qp *qp; 2005 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 2006 struct dpseci_rx_queue_cfg cfg; 2007 int32_t retcode; 2008 char str[30]; 2009 2010 PMD_INIT_FUNC_TRACE(); 2011 2012 /* If qp is already in use free ring memory and qp metadata. */ 2013 if (dev->data->queue_pairs[qp_id] != NULL) { 2014 DPAA2_SEC_INFO("QP already setup"); 2015 return 0; 2016 } 2017 2018 if (qp_conf->nb_descriptors < (2 * FLE_POOL_CACHE_SIZE)) { 2019 DPAA2_SEC_ERR("Minimum supported nb_descriptors %d," 2020 " but given %d", (2 * FLE_POOL_CACHE_SIZE), 2021 qp_conf->nb_descriptors); 2022 return -EINVAL; 2023 } 2024 2025 DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p", 2026 dev, qp_id, qp_conf); 2027 2028 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 2029 2030 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp), 2031 RTE_CACHE_LINE_SIZE); 2032 if (!qp) { 2033 DPAA2_SEC_ERR("malloc failed for rx/tx queues"); 2034 return -ENOMEM; 2035 } 2036 2037 qp->rx_vq.crypto_data = dev->data; 2038 qp->tx_vq.crypto_data = dev->data; 2039 qp->rx_vq.q_storage = rte_malloc("sec dq storage", 2040 sizeof(struct queue_storage_info_t), 2041 RTE_CACHE_LINE_SIZE); 2042 if (!qp->rx_vq.q_storage) { 2043 DPAA2_SEC_ERR("malloc failed for q_storage"); 2044 return -ENOMEM; 2045 } 2046 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t)); 2047 2048 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) { 2049 DPAA2_SEC_ERR("Unable to allocate dequeue storage"); 2050 return -ENOMEM; 2051 } 2052 2053 dev->data->queue_pairs[qp_id] = qp; 2054 2055 snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d_%d", 2056 getpid(), dev->data->dev_id, qp_id); 2057 qp->fle_pool = rte_mempool_create((const char *)str, 2058 qp_conf->nb_descriptors, 2059 FLE_POOL_BUF_SIZE, 2060 FLE_POOL_CACHE_SIZE, 0, 2061 NULL, NULL, NULL, NULL, 2062 SOCKET_ID_ANY, MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET); 2063 if (!qp->fle_pool) { 2064 DPAA2_SEC_ERR("Mempool (%s) creation failed", str); 2065 return -ENOMEM; 2066 } 2067 2068 cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX; 2069 cfg.user_ctx = (size_t)(&qp->rx_vq); 2070 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 2071 qp_id, &cfg); 2072 return retcode; 2073 } 2074 2075 /** Returns the size of the aesni gcm session structure */ 2076 static unsigned int 2077 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) 2078 { 2079 PMD_INIT_FUNC_TRACE(); 2080 2081 return sizeof(dpaa2_sec_session); 2082 } 2083 2084 static int 2085 dpaa2_sec_cipher_init(struct rte_crypto_sym_xform *xform, 2086 dpaa2_sec_session *session) 2087 { 2088 struct alginfo cipherdata; 2089 int bufsize, ret = 0; 2090 struct ctxt_priv *priv; 2091 struct sec_flow_context *flc; 2092 2093 PMD_INIT_FUNC_TRACE(); 2094 2095 /* For SEC CIPHER only one descriptor is required. */ 2096 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2097 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 2098 RTE_CACHE_LINE_SIZE); 2099 if (priv == NULL) { 2100 DPAA2_SEC_ERR("No Memory for priv CTXT"); 2101 return -ENOMEM; 2102 } 2103 2104 flc = &priv->flc_desc[0].flc; 2105 2106 session->ctxt_type = DPAA2_SEC_CIPHER; 2107 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, 2108 RTE_CACHE_LINE_SIZE); 2109 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) { 2110 DPAA2_SEC_ERR("No Memory for cipher key"); 2111 rte_free(priv); 2112 return -ENOMEM; 2113 } 2114 session->cipher_key.length = xform->cipher.key.length; 2115 2116 memcpy(session->cipher_key.data, xform->cipher.key.data, 2117 xform->cipher.key.length); 2118 cipherdata.key = (size_t)session->cipher_key.data; 2119 cipherdata.keylen = session->cipher_key.length; 2120 cipherdata.key_enc_flags = 0; 2121 cipherdata.key_type = RTA_DATA_IMM; 2122 2123 /* Set IV parameters */ 2124 session->iv.offset = xform->cipher.iv.offset; 2125 session->iv.length = xform->cipher.iv.length; 2126 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2127 DIR_ENC : DIR_DEC; 2128 2129 switch (xform->cipher.algo) { 2130 case RTE_CRYPTO_CIPHER_AES_CBC: 2131 cipherdata.algtype = OP_ALG_ALGSEL_AES; 2132 cipherdata.algmode = OP_ALG_AAI_CBC; 2133 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 2134 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 2135 SHR_NEVER, &cipherdata, 2136 session->iv.length, 2137 session->dir); 2138 break; 2139 case RTE_CRYPTO_CIPHER_3DES_CBC: 2140 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 2141 cipherdata.algmode = OP_ALG_AAI_CBC; 2142 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 2143 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 2144 SHR_NEVER, &cipherdata, 2145 session->iv.length, 2146 session->dir); 2147 break; 2148 case RTE_CRYPTO_CIPHER_DES_CBC: 2149 cipherdata.algtype = OP_ALG_ALGSEL_DES; 2150 cipherdata.algmode = OP_ALG_AAI_CBC; 2151 session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC; 2152 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 2153 SHR_NEVER, &cipherdata, 2154 session->iv.length, 2155 session->dir); 2156 break; 2157 case RTE_CRYPTO_CIPHER_AES_CTR: 2158 cipherdata.algtype = OP_ALG_ALGSEL_AES; 2159 cipherdata.algmode = OP_ALG_AAI_CTR; 2160 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 2161 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 2162 SHR_NEVER, &cipherdata, 2163 session->iv.length, 2164 session->dir); 2165 break; 2166 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2167 cipherdata.algtype = OP_ALG_ALGSEL_SNOW_F8; 2168 session->cipher_alg = RTE_CRYPTO_CIPHER_SNOW3G_UEA2; 2169 bufsize = cnstr_shdsc_snow_f8(priv->flc_desc[0].desc, 1, 0, 2170 &cipherdata, 2171 session->dir); 2172 break; 2173 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2174 cipherdata.algtype = OP_ALG_ALGSEL_ZUCE; 2175 session->cipher_alg = RTE_CRYPTO_CIPHER_ZUC_EEA3; 2176 bufsize = cnstr_shdsc_zuce(priv->flc_desc[0].desc, 1, 0, 2177 &cipherdata, 2178 session->dir); 2179 break; 2180 default: 2181 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %s (%u)", 2182 rte_cryptodev_get_cipher_algo_string(xform->cipher.algo), 2183 xform->cipher.algo); 2184 ret = -ENOTSUP; 2185 goto error_out; 2186 } 2187 2188 if (bufsize < 0) { 2189 DPAA2_SEC_ERR("Crypto: Descriptor build failed"); 2190 ret = -EINVAL; 2191 goto error_out; 2192 } 2193 2194 flc->word1_sdl = (uint8_t)bufsize; 2195 session->ctxt = priv; 2196 2197 #ifdef CAAM_DESC_DEBUG 2198 int i; 2199 for (i = 0; i < bufsize; i++) 2200 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]); 2201 #endif 2202 return ret; 2203 2204 error_out: 2205 rte_free(session->cipher_key.data); 2206 rte_free(priv); 2207 return ret; 2208 } 2209 2210 static int 2211 dpaa2_sec_auth_init(struct rte_crypto_sym_xform *xform, 2212 dpaa2_sec_session *session) 2213 { 2214 struct alginfo authdata; 2215 int bufsize, ret = 0; 2216 struct ctxt_priv *priv; 2217 struct sec_flow_context *flc; 2218 2219 PMD_INIT_FUNC_TRACE(); 2220 2221 memset(&authdata, 0, sizeof(authdata)); 2222 2223 /* For SEC AUTH three descriptors are required for various stages */ 2224 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2225 sizeof(struct ctxt_priv) + 3 * 2226 sizeof(struct sec_flc_desc), 2227 RTE_CACHE_LINE_SIZE); 2228 if (priv == NULL) { 2229 DPAA2_SEC_ERR("No Memory for priv CTXT"); 2230 return -ENOMEM; 2231 } 2232 2233 flc = &priv->flc_desc[DESC_INITFINAL].flc; 2234 2235 session->ctxt_type = DPAA2_SEC_AUTH; 2236 session->auth_key.length = xform->auth.key.length; 2237 if (xform->auth.key.length) { 2238 session->auth_key.data = rte_zmalloc(NULL, 2239 xform->auth.key.length, 2240 RTE_CACHE_LINE_SIZE); 2241 if (session->auth_key.data == NULL) { 2242 DPAA2_SEC_ERR("Unable to allocate memory for auth key"); 2243 rte_free(priv); 2244 return -ENOMEM; 2245 } 2246 memcpy(session->auth_key.data, xform->auth.key.data, 2247 xform->auth.key.length); 2248 authdata.key = (size_t)session->auth_key.data; 2249 authdata.key_enc_flags = 0; 2250 authdata.key_type = RTA_DATA_IMM; 2251 } 2252 authdata.keylen = session->auth_key.length; 2253 2254 session->digest_length = xform->auth.digest_length; 2255 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? 2256 DIR_ENC : DIR_DEC; 2257 2258 switch (xform->auth.algo) { 2259 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2260 authdata.algtype = OP_ALG_ALGSEL_SHA1; 2261 authdata.algmode = OP_ALG_AAI_HMAC; 2262 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 2263 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2264 1, 0, SHR_NEVER, &authdata, 2265 !session->dir, 2266 session->digest_length); 2267 break; 2268 case RTE_CRYPTO_AUTH_MD5_HMAC: 2269 authdata.algtype = OP_ALG_ALGSEL_MD5; 2270 authdata.algmode = OP_ALG_AAI_HMAC; 2271 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 2272 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2273 1, 0, SHR_NEVER, &authdata, 2274 !session->dir, 2275 session->digest_length); 2276 break; 2277 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2278 authdata.algtype = OP_ALG_ALGSEL_SHA256; 2279 authdata.algmode = OP_ALG_AAI_HMAC; 2280 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 2281 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2282 1, 0, SHR_NEVER, &authdata, 2283 !session->dir, 2284 session->digest_length); 2285 break; 2286 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2287 authdata.algtype = OP_ALG_ALGSEL_SHA384; 2288 authdata.algmode = OP_ALG_AAI_HMAC; 2289 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 2290 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2291 1, 0, SHR_NEVER, &authdata, 2292 !session->dir, 2293 session->digest_length); 2294 break; 2295 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2296 authdata.algtype = OP_ALG_ALGSEL_SHA512; 2297 authdata.algmode = OP_ALG_AAI_HMAC; 2298 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 2299 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2300 1, 0, SHR_NEVER, &authdata, 2301 !session->dir, 2302 session->digest_length); 2303 break; 2304 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2305 authdata.algtype = OP_ALG_ALGSEL_SHA224; 2306 authdata.algmode = OP_ALG_AAI_HMAC; 2307 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 2308 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2309 1, 0, SHR_NEVER, &authdata, 2310 !session->dir, 2311 session->digest_length); 2312 break; 2313 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2314 authdata.algtype = OP_ALG_ALGSEL_SNOW_F9; 2315 authdata.algmode = OP_ALG_AAI_F9; 2316 session->auth_alg = RTE_CRYPTO_AUTH_SNOW3G_UIA2; 2317 session->iv.offset = xform->auth.iv.offset; 2318 session->iv.length = xform->auth.iv.length; 2319 bufsize = cnstr_shdsc_snow_f9(priv->flc_desc[DESC_INITFINAL].desc, 2320 1, 0, &authdata, 2321 !session->dir, 2322 session->digest_length); 2323 break; 2324 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2325 authdata.algtype = OP_ALG_ALGSEL_ZUCA; 2326 authdata.algmode = OP_ALG_AAI_F9; 2327 session->auth_alg = RTE_CRYPTO_AUTH_ZUC_EIA3; 2328 session->iv.offset = xform->auth.iv.offset; 2329 session->iv.length = xform->auth.iv.length; 2330 bufsize = cnstr_shdsc_zuca(priv->flc_desc[DESC_INITFINAL].desc, 2331 1, 0, &authdata, 2332 !session->dir, 2333 session->digest_length); 2334 break; 2335 case RTE_CRYPTO_AUTH_SHA1: 2336 authdata.algtype = OP_ALG_ALGSEL_SHA1; 2337 authdata.algmode = OP_ALG_AAI_HASH; 2338 session->auth_alg = RTE_CRYPTO_AUTH_SHA1; 2339 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2340 1, 0, SHR_NEVER, &authdata, 2341 !session->dir, 2342 session->digest_length); 2343 break; 2344 case RTE_CRYPTO_AUTH_MD5: 2345 authdata.algtype = OP_ALG_ALGSEL_MD5; 2346 authdata.algmode = OP_ALG_AAI_HASH; 2347 session->auth_alg = RTE_CRYPTO_AUTH_MD5; 2348 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2349 1, 0, SHR_NEVER, &authdata, 2350 !session->dir, 2351 session->digest_length); 2352 break; 2353 case RTE_CRYPTO_AUTH_SHA256: 2354 authdata.algtype = OP_ALG_ALGSEL_SHA256; 2355 authdata.algmode = OP_ALG_AAI_HASH; 2356 session->auth_alg = RTE_CRYPTO_AUTH_SHA256; 2357 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2358 1, 0, SHR_NEVER, &authdata, 2359 !session->dir, 2360 session->digest_length); 2361 break; 2362 case RTE_CRYPTO_AUTH_SHA384: 2363 authdata.algtype = OP_ALG_ALGSEL_SHA384; 2364 authdata.algmode = OP_ALG_AAI_HASH; 2365 session->auth_alg = RTE_CRYPTO_AUTH_SHA384; 2366 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2367 1, 0, SHR_NEVER, &authdata, 2368 !session->dir, 2369 session->digest_length); 2370 break; 2371 case RTE_CRYPTO_AUTH_SHA512: 2372 authdata.algtype = OP_ALG_ALGSEL_SHA512; 2373 authdata.algmode = OP_ALG_AAI_HASH; 2374 session->auth_alg = RTE_CRYPTO_AUTH_SHA512; 2375 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2376 1, 0, SHR_NEVER, &authdata, 2377 !session->dir, 2378 session->digest_length); 2379 break; 2380 case RTE_CRYPTO_AUTH_SHA224: 2381 authdata.algtype = OP_ALG_ALGSEL_SHA224; 2382 authdata.algmode = OP_ALG_AAI_HASH; 2383 session->auth_alg = RTE_CRYPTO_AUTH_SHA224; 2384 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2385 1, 0, SHR_NEVER, &authdata, 2386 !session->dir, 2387 session->digest_length); 2388 break; 2389 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2390 authdata.algtype = OP_ALG_ALGSEL_AES; 2391 authdata.algmode = OP_ALG_AAI_XCBC_MAC; 2392 session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC; 2393 bufsize = cnstr_shdsc_aes_mac( 2394 priv->flc_desc[DESC_INITFINAL].desc, 2395 1, 0, SHR_NEVER, &authdata, 2396 !session->dir, 2397 session->digest_length); 2398 break; 2399 case RTE_CRYPTO_AUTH_AES_CMAC: 2400 authdata.algtype = OP_ALG_ALGSEL_AES; 2401 authdata.algmode = OP_ALG_AAI_CMAC; 2402 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC; 2403 bufsize = cnstr_shdsc_aes_mac( 2404 priv->flc_desc[DESC_INITFINAL].desc, 2405 1, 0, SHR_NEVER, &authdata, 2406 !session->dir, 2407 session->digest_length); 2408 break; 2409 default: 2410 DPAA2_SEC_ERR("Crypto: Unsupported Auth alg %s (%u)", 2411 rte_cryptodev_get_auth_algo_string(xform->auth.algo), 2412 xform->auth.algo); 2413 ret = -ENOTSUP; 2414 goto error_out; 2415 } 2416 2417 if (bufsize < 0) { 2418 DPAA2_SEC_ERR("Crypto: Invalid SEC-DESC buffer length"); 2419 ret = -EINVAL; 2420 goto error_out; 2421 } 2422 2423 flc->word1_sdl = (uint8_t)bufsize; 2424 session->ctxt = priv; 2425 #ifdef CAAM_DESC_DEBUG 2426 int i; 2427 for (i = 0; i < bufsize; i++) 2428 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", 2429 i, priv->flc_desc[DESC_INITFINAL].desc[i]); 2430 #endif 2431 2432 return ret; 2433 2434 error_out: 2435 rte_free(session->auth_key.data); 2436 rte_free(priv); 2437 return ret; 2438 } 2439 2440 static int 2441 dpaa2_sec_aead_init(struct rte_crypto_sym_xform *xform, 2442 dpaa2_sec_session *session) 2443 { 2444 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt; 2445 struct alginfo aeaddata; 2446 int bufsize; 2447 struct ctxt_priv *priv; 2448 struct sec_flow_context *flc; 2449 struct rte_crypto_aead_xform *aead_xform = &xform->aead; 2450 int err, ret = 0; 2451 2452 PMD_INIT_FUNC_TRACE(); 2453 2454 /* Set IV parameters */ 2455 session->iv.offset = aead_xform->iv.offset; 2456 session->iv.length = aead_xform->iv.length; 2457 session->ctxt_type = DPAA2_SEC_AEAD; 2458 2459 /* For SEC AEAD only one descriptor is required */ 2460 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2461 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 2462 RTE_CACHE_LINE_SIZE); 2463 if (priv == NULL) { 2464 DPAA2_SEC_ERR("No Memory for priv CTXT"); 2465 return -ENOMEM; 2466 } 2467 2468 flc = &priv->flc_desc[0].flc; 2469 2470 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 2471 RTE_CACHE_LINE_SIZE); 2472 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 2473 DPAA2_SEC_ERR("No Memory for aead key"); 2474 rte_free(priv); 2475 return -ENOMEM; 2476 } 2477 memcpy(session->aead_key.data, aead_xform->key.data, 2478 aead_xform->key.length); 2479 2480 session->digest_length = aead_xform->digest_length; 2481 session->aead_key.length = aead_xform->key.length; 2482 ctxt->auth_only_len = aead_xform->aad_length; 2483 2484 aeaddata.key = (size_t)session->aead_key.data; 2485 aeaddata.keylen = session->aead_key.length; 2486 aeaddata.key_enc_flags = 0; 2487 aeaddata.key_type = RTA_DATA_IMM; 2488 2489 switch (aead_xform->algo) { 2490 case RTE_CRYPTO_AEAD_AES_GCM: 2491 aeaddata.algtype = OP_ALG_ALGSEL_AES; 2492 aeaddata.algmode = OP_ALG_AAI_GCM; 2493 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; 2494 break; 2495 default: 2496 2497 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %s (%u)", 2498 rte_cryptodev_get_aead_algo_string(aead_xform->algo), 2499 aead_xform->algo); 2500 ret = -ENOTSUP; 2501 goto error_out; 2502 } 2503 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 2504 DIR_ENC : DIR_DEC; 2505 2506 priv->flc_desc[0].desc[0] = aeaddata.keylen; 2507 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 2508 DESC_JOB_IO_LEN, 2509 (unsigned int *)priv->flc_desc[0].desc, 2510 &priv->flc_desc[0].desc[1], 1); 2511 2512 if (err < 0) { 2513 DPAA2_SEC_ERR("Crypto: Incorrect key lengths"); 2514 ret = -EINVAL; 2515 goto error_out; 2516 } 2517 if (priv->flc_desc[0].desc[1] & 1) { 2518 aeaddata.key_type = RTA_DATA_IMM; 2519 } else { 2520 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key); 2521 aeaddata.key_type = RTA_DATA_PTR; 2522 } 2523 priv->flc_desc[0].desc[0] = 0; 2524 priv->flc_desc[0].desc[1] = 0; 2525 2526 if (session->dir == DIR_ENC) 2527 bufsize = cnstr_shdsc_gcm_encap( 2528 priv->flc_desc[0].desc, 1, 0, SHR_NEVER, 2529 &aeaddata, session->iv.length, 2530 session->digest_length); 2531 else 2532 bufsize = cnstr_shdsc_gcm_decap( 2533 priv->flc_desc[0].desc, 1, 0, SHR_NEVER, 2534 &aeaddata, session->iv.length, 2535 session->digest_length); 2536 if (bufsize < 0) { 2537 DPAA2_SEC_ERR("Crypto: Invalid SEC-DESC buffer length"); 2538 ret = -EINVAL; 2539 goto error_out; 2540 } 2541 2542 flc->word1_sdl = (uint8_t)bufsize; 2543 session->ctxt = priv; 2544 #ifdef CAAM_DESC_DEBUG 2545 int i; 2546 for (i = 0; i < bufsize; i++) 2547 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", 2548 i, priv->flc_desc[0].desc[i]); 2549 #endif 2550 return ret; 2551 2552 error_out: 2553 rte_free(session->aead_key.data); 2554 rte_free(priv); 2555 return ret; 2556 } 2557 2558 2559 static int 2560 dpaa2_sec_aead_chain_init(struct rte_crypto_sym_xform *xform, 2561 dpaa2_sec_session *session) 2562 { 2563 struct alginfo authdata, cipherdata; 2564 int bufsize; 2565 struct ctxt_priv *priv; 2566 struct sec_flow_context *flc; 2567 struct rte_crypto_cipher_xform *cipher_xform; 2568 struct rte_crypto_auth_xform *auth_xform; 2569 int err, ret = 0; 2570 2571 PMD_INIT_FUNC_TRACE(); 2572 2573 if (session->ext_params.aead_ctxt.auth_cipher_text) { 2574 cipher_xform = &xform->cipher; 2575 auth_xform = &xform->next->auth; 2576 session->ctxt_type = 2577 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2578 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER; 2579 } else { 2580 cipher_xform = &xform->next->cipher; 2581 auth_xform = &xform->auth; 2582 session->ctxt_type = 2583 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2584 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH; 2585 } 2586 2587 /* Set IV parameters */ 2588 session->iv.offset = cipher_xform->iv.offset; 2589 session->iv.length = cipher_xform->iv.length; 2590 2591 /* For SEC AEAD only one descriptor is required */ 2592 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2593 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 2594 RTE_CACHE_LINE_SIZE); 2595 if (priv == NULL) { 2596 DPAA2_SEC_ERR("No Memory for priv CTXT"); 2597 return -ENOMEM; 2598 } 2599 2600 flc = &priv->flc_desc[0].flc; 2601 2602 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length, 2603 RTE_CACHE_LINE_SIZE); 2604 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) { 2605 DPAA2_SEC_ERR("No Memory for cipher key"); 2606 rte_free(priv); 2607 return -ENOMEM; 2608 } 2609 session->cipher_key.length = cipher_xform->key.length; 2610 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length, 2611 RTE_CACHE_LINE_SIZE); 2612 if (session->auth_key.data == NULL && auth_xform->key.length > 0) { 2613 DPAA2_SEC_ERR("No Memory for auth key"); 2614 rte_free(session->cipher_key.data); 2615 rte_free(priv); 2616 return -ENOMEM; 2617 } 2618 session->auth_key.length = auth_xform->key.length; 2619 memcpy(session->cipher_key.data, cipher_xform->key.data, 2620 cipher_xform->key.length); 2621 memcpy(session->auth_key.data, auth_xform->key.data, 2622 auth_xform->key.length); 2623 2624 authdata.key = (size_t)session->auth_key.data; 2625 authdata.keylen = session->auth_key.length; 2626 authdata.key_enc_flags = 0; 2627 authdata.key_type = RTA_DATA_IMM; 2628 2629 session->digest_length = auth_xform->digest_length; 2630 2631 switch (auth_xform->algo) { 2632 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2633 authdata.algtype = OP_ALG_ALGSEL_SHA1; 2634 authdata.algmode = OP_ALG_AAI_HMAC; 2635 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 2636 break; 2637 case RTE_CRYPTO_AUTH_MD5_HMAC: 2638 authdata.algtype = OP_ALG_ALGSEL_MD5; 2639 authdata.algmode = OP_ALG_AAI_HMAC; 2640 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 2641 break; 2642 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2643 authdata.algtype = OP_ALG_ALGSEL_SHA224; 2644 authdata.algmode = OP_ALG_AAI_HMAC; 2645 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 2646 break; 2647 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2648 authdata.algtype = OP_ALG_ALGSEL_SHA256; 2649 authdata.algmode = OP_ALG_AAI_HMAC; 2650 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 2651 break; 2652 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2653 authdata.algtype = OP_ALG_ALGSEL_SHA384; 2654 authdata.algmode = OP_ALG_AAI_HMAC; 2655 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 2656 break; 2657 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2658 authdata.algtype = OP_ALG_ALGSEL_SHA512; 2659 authdata.algmode = OP_ALG_AAI_HMAC; 2660 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 2661 break; 2662 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2663 authdata.algtype = OP_ALG_ALGSEL_AES; 2664 authdata.algmode = OP_ALG_AAI_XCBC_MAC; 2665 session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC; 2666 break; 2667 case RTE_CRYPTO_AUTH_AES_CMAC: 2668 authdata.algtype = OP_ALG_ALGSEL_AES; 2669 authdata.algmode = OP_ALG_AAI_CMAC; 2670 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC; 2671 break; 2672 default: 2673 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %s (%u)", 2674 rte_cryptodev_get_auth_algo_string(auth_xform->algo), 2675 auth_xform->algo); 2676 ret = -ENOTSUP; 2677 goto error_out; 2678 } 2679 cipherdata.key = (size_t)session->cipher_key.data; 2680 cipherdata.keylen = session->cipher_key.length; 2681 cipherdata.key_enc_flags = 0; 2682 cipherdata.key_type = RTA_DATA_IMM; 2683 2684 switch (cipher_xform->algo) { 2685 case RTE_CRYPTO_CIPHER_AES_CBC: 2686 cipherdata.algtype = OP_ALG_ALGSEL_AES; 2687 cipherdata.algmode = OP_ALG_AAI_CBC; 2688 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 2689 break; 2690 case RTE_CRYPTO_CIPHER_3DES_CBC: 2691 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 2692 cipherdata.algmode = OP_ALG_AAI_CBC; 2693 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 2694 break; 2695 case RTE_CRYPTO_CIPHER_DES_CBC: 2696 cipherdata.algtype = OP_ALG_ALGSEL_DES; 2697 cipherdata.algmode = OP_ALG_AAI_CBC; 2698 session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC; 2699 break; 2700 case RTE_CRYPTO_CIPHER_AES_CTR: 2701 cipherdata.algtype = OP_ALG_ALGSEL_AES; 2702 cipherdata.algmode = OP_ALG_AAI_CTR; 2703 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 2704 break; 2705 default: 2706 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %s (%u)", 2707 rte_cryptodev_get_cipher_algo_string(cipher_xform->algo), 2708 cipher_xform->algo); 2709 ret = -ENOTSUP; 2710 goto error_out; 2711 } 2712 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2713 DIR_ENC : DIR_DEC; 2714 2715 priv->flc_desc[0].desc[0] = cipherdata.keylen; 2716 priv->flc_desc[0].desc[1] = authdata.keylen; 2717 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 2718 DESC_JOB_IO_LEN, 2719 (unsigned int *)priv->flc_desc[0].desc, 2720 &priv->flc_desc[0].desc[2], 2); 2721 2722 if (err < 0) { 2723 DPAA2_SEC_ERR("Crypto: Incorrect key lengths"); 2724 ret = -EINVAL; 2725 goto error_out; 2726 } 2727 if (priv->flc_desc[0].desc[2] & 1) { 2728 cipherdata.key_type = RTA_DATA_IMM; 2729 } else { 2730 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); 2731 cipherdata.key_type = RTA_DATA_PTR; 2732 } 2733 if (priv->flc_desc[0].desc[2] & (1 << 1)) { 2734 authdata.key_type = RTA_DATA_IMM; 2735 } else { 2736 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key); 2737 authdata.key_type = RTA_DATA_PTR; 2738 } 2739 priv->flc_desc[0].desc[0] = 0; 2740 priv->flc_desc[0].desc[1] = 0; 2741 priv->flc_desc[0].desc[2] = 0; 2742 2743 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) { 2744 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1, 2745 0, SHR_SERIAL, 2746 &cipherdata, &authdata, 2747 session->iv.length, 2748 session->digest_length, 2749 session->dir); 2750 if (bufsize < 0) { 2751 DPAA2_SEC_ERR("Crypto: Invalid SEC-DESC buffer length"); 2752 ret = -EINVAL; 2753 goto error_out; 2754 } 2755 } else { 2756 DPAA2_SEC_ERR("Hash before cipher not supported"); 2757 ret = -ENOTSUP; 2758 goto error_out; 2759 } 2760 2761 flc->word1_sdl = (uint8_t)bufsize; 2762 session->ctxt = priv; 2763 #ifdef CAAM_DESC_DEBUG 2764 int i; 2765 for (i = 0; i < bufsize; i++) 2766 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", 2767 i, priv->flc_desc[0].desc[i]); 2768 #endif 2769 2770 return ret; 2771 2772 error_out: 2773 rte_free(session->cipher_key.data); 2774 rte_free(session->auth_key.data); 2775 rte_free(priv); 2776 return ret; 2777 } 2778 2779 static int 2780 dpaa2_sec_set_session_parameters(struct rte_crypto_sym_xform *xform, void *sess) 2781 { 2782 dpaa2_sec_session *session = sess; 2783 int ret; 2784 2785 PMD_INIT_FUNC_TRACE(); 2786 2787 if (unlikely(sess == NULL)) { 2788 DPAA2_SEC_ERR("Invalid session struct"); 2789 return -EINVAL; 2790 } 2791 2792 memset(session, 0, sizeof(dpaa2_sec_session)); 2793 /* Default IV length = 0 */ 2794 session->iv.length = 0; 2795 2796 /* Cipher Only */ 2797 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 2798 ret = dpaa2_sec_cipher_init(xform, session); 2799 2800 /* Authentication Only */ 2801 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2802 xform->next == NULL) { 2803 ret = dpaa2_sec_auth_init(xform, session); 2804 2805 /* Cipher then Authenticate */ 2806 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 2807 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2808 session->ext_params.aead_ctxt.auth_cipher_text = true; 2809 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL) 2810 ret = dpaa2_sec_auth_init(xform, session); 2811 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL) 2812 ret = dpaa2_sec_cipher_init(xform, session); 2813 else 2814 ret = dpaa2_sec_aead_chain_init(xform, session); 2815 /* Authenticate then Cipher */ 2816 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2817 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2818 session->ext_params.aead_ctxt.auth_cipher_text = false; 2819 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL) 2820 ret = dpaa2_sec_cipher_init(xform, session); 2821 else if (xform->next->cipher.algo == RTE_CRYPTO_CIPHER_NULL) 2822 ret = dpaa2_sec_auth_init(xform, session); 2823 else 2824 ret = dpaa2_sec_aead_chain_init(xform, session); 2825 /* AEAD operation for AES-GCM kind of Algorithms */ 2826 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && 2827 xform->next == NULL) { 2828 ret = dpaa2_sec_aead_init(xform, session); 2829 2830 } else { 2831 DPAA2_SEC_ERR("Invalid crypto type"); 2832 return -EINVAL; 2833 } 2834 2835 return ret; 2836 } 2837 2838 static int 2839 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform, 2840 dpaa2_sec_session *session, 2841 struct alginfo *aeaddata) 2842 { 2843 PMD_INIT_FUNC_TRACE(); 2844 2845 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 2846 RTE_CACHE_LINE_SIZE); 2847 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 2848 DPAA2_SEC_ERR("No Memory for aead key"); 2849 return -ENOMEM; 2850 } 2851 memcpy(session->aead_key.data, aead_xform->key.data, 2852 aead_xform->key.length); 2853 2854 session->digest_length = aead_xform->digest_length; 2855 session->aead_key.length = aead_xform->key.length; 2856 2857 aeaddata->key = (size_t)session->aead_key.data; 2858 aeaddata->keylen = session->aead_key.length; 2859 aeaddata->key_enc_flags = 0; 2860 aeaddata->key_type = RTA_DATA_IMM; 2861 2862 switch (aead_xform->algo) { 2863 case RTE_CRYPTO_AEAD_AES_GCM: 2864 switch (session->digest_length) { 2865 case 8: 2866 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM8; 2867 break; 2868 case 12: 2869 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM12; 2870 break; 2871 case 16: 2872 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM16; 2873 break; 2874 default: 2875 DPAA2_SEC_ERR("Crypto: Undefined GCM digest %d", 2876 session->digest_length); 2877 return -EINVAL; 2878 } 2879 aeaddata->algmode = OP_ALG_AAI_GCM; 2880 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; 2881 break; 2882 case RTE_CRYPTO_AEAD_AES_CCM: 2883 switch (session->digest_length) { 2884 case 8: 2885 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM8; 2886 break; 2887 case 12: 2888 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM12; 2889 break; 2890 case 16: 2891 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM16; 2892 break; 2893 default: 2894 DPAA2_SEC_ERR("Crypto: Undefined CCM digest %d", 2895 session->digest_length); 2896 return -EINVAL; 2897 } 2898 aeaddata->algmode = OP_ALG_AAI_CCM; 2899 session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM; 2900 break; 2901 default: 2902 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u", 2903 aead_xform->algo); 2904 return -ENOTSUP; 2905 } 2906 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 2907 DIR_ENC : DIR_DEC; 2908 2909 return 0; 2910 } 2911 2912 static int 2913 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform, 2914 struct rte_crypto_auth_xform *auth_xform, 2915 dpaa2_sec_session *session, 2916 struct alginfo *cipherdata, 2917 struct alginfo *authdata) 2918 { 2919 if (cipher_xform) { 2920 session->cipher_key.data = rte_zmalloc(NULL, 2921 cipher_xform->key.length, 2922 RTE_CACHE_LINE_SIZE); 2923 if (session->cipher_key.data == NULL && 2924 cipher_xform->key.length > 0) { 2925 DPAA2_SEC_ERR("No Memory for cipher key"); 2926 return -ENOMEM; 2927 } 2928 2929 session->cipher_key.length = cipher_xform->key.length; 2930 memcpy(session->cipher_key.data, cipher_xform->key.data, 2931 cipher_xform->key.length); 2932 session->cipher_alg = cipher_xform->algo; 2933 } else { 2934 session->cipher_key.data = NULL; 2935 session->cipher_key.length = 0; 2936 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2937 } 2938 2939 if (auth_xform) { 2940 session->auth_key.data = rte_zmalloc(NULL, 2941 auth_xform->key.length, 2942 RTE_CACHE_LINE_SIZE); 2943 if (session->auth_key.data == NULL && 2944 auth_xform->key.length > 0) { 2945 DPAA2_SEC_ERR("No Memory for auth key"); 2946 return -ENOMEM; 2947 } 2948 session->auth_key.length = auth_xform->key.length; 2949 memcpy(session->auth_key.data, auth_xform->key.data, 2950 auth_xform->key.length); 2951 session->auth_alg = auth_xform->algo; 2952 session->digest_length = auth_xform->digest_length; 2953 } else { 2954 session->auth_key.data = NULL; 2955 session->auth_key.length = 0; 2956 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 2957 } 2958 2959 authdata->key = (size_t)session->auth_key.data; 2960 authdata->keylen = session->auth_key.length; 2961 authdata->key_enc_flags = 0; 2962 authdata->key_type = RTA_DATA_IMM; 2963 switch (session->auth_alg) { 2964 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2965 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96; 2966 authdata->algmode = OP_ALG_AAI_HMAC; 2967 break; 2968 case RTE_CRYPTO_AUTH_MD5_HMAC: 2969 authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96; 2970 authdata->algmode = OP_ALG_AAI_HMAC; 2971 break; 2972 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2973 authdata->algmode = OP_ALG_AAI_HMAC; 2974 if (session->digest_length == 6) 2975 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_224_96; 2976 else if (session->digest_length == 14) 2977 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_224_224; 2978 else 2979 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_224_112; 2980 break; 2981 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2982 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128; 2983 authdata->algmode = OP_ALG_AAI_HMAC; 2984 if (session->digest_length != 16) 2985 DPAA2_SEC_WARN( 2986 "+++Using sha256-hmac truncated len is non-standard," 2987 "it will not work with lookaside proto"); 2988 break; 2989 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2990 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192; 2991 authdata->algmode = OP_ALG_AAI_HMAC; 2992 break; 2993 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2994 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256; 2995 authdata->algmode = OP_ALG_AAI_HMAC; 2996 break; 2997 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2998 authdata->algtype = OP_PCL_IPSEC_AES_XCBC_MAC_96; 2999 authdata->algmode = OP_ALG_AAI_XCBC_MAC; 3000 break; 3001 case RTE_CRYPTO_AUTH_AES_CMAC: 3002 authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96; 3003 authdata->algmode = OP_ALG_AAI_CMAC; 3004 break; 3005 case RTE_CRYPTO_AUTH_NULL: 3006 authdata->algtype = OP_PCL_IPSEC_HMAC_NULL; 3007 break; 3008 default: 3009 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %s (%u)", 3010 rte_cryptodev_get_auth_algo_string(session->auth_alg), 3011 session->auth_alg); 3012 return -ENOTSUP; 3013 } 3014 cipherdata->key = (size_t)session->cipher_key.data; 3015 cipherdata->keylen = session->cipher_key.length; 3016 cipherdata->key_enc_flags = 0; 3017 cipherdata->key_type = RTA_DATA_IMM; 3018 3019 switch (session->cipher_alg) { 3020 case RTE_CRYPTO_CIPHER_AES_CBC: 3021 cipherdata->algtype = OP_PCL_IPSEC_AES_CBC; 3022 cipherdata->algmode = OP_ALG_AAI_CBC; 3023 break; 3024 case RTE_CRYPTO_CIPHER_3DES_CBC: 3025 cipherdata->algtype = OP_PCL_IPSEC_3DES; 3026 cipherdata->algmode = OP_ALG_AAI_CBC; 3027 break; 3028 case RTE_CRYPTO_CIPHER_DES_CBC: 3029 cipherdata->algtype = OP_PCL_IPSEC_DES; 3030 cipherdata->algmode = OP_ALG_AAI_CBC; 3031 break; 3032 case RTE_CRYPTO_CIPHER_AES_CTR: 3033 cipherdata->algtype = OP_PCL_IPSEC_AES_CTR; 3034 cipherdata->algmode = OP_ALG_AAI_CTR; 3035 break; 3036 case RTE_CRYPTO_CIPHER_NULL: 3037 cipherdata->algtype = OP_PCL_IPSEC_NULL; 3038 break; 3039 default: 3040 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %s (%u)", 3041 rte_cryptodev_get_cipher_algo_string(session->cipher_alg), 3042 session->cipher_alg); 3043 return -ENOTSUP; 3044 } 3045 3046 return 0; 3047 } 3048 3049 static int 3050 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, 3051 struct rte_security_session_conf *conf, 3052 void *sess) 3053 { 3054 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec; 3055 struct rte_crypto_cipher_xform *cipher_xform = NULL; 3056 struct rte_crypto_auth_xform *auth_xform = NULL; 3057 struct rte_crypto_aead_xform *aead_xform = NULL; 3058 dpaa2_sec_session *session = (dpaa2_sec_session *)sess; 3059 struct ctxt_priv *priv; 3060 struct alginfo authdata, cipherdata; 3061 int bufsize; 3062 struct sec_flow_context *flc; 3063 int ret = -1; 3064 3065 PMD_INIT_FUNC_TRACE(); 3066 3067 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 3068 sizeof(struct ctxt_priv) + 3069 sizeof(struct sec_flc_desc), 3070 RTE_CACHE_LINE_SIZE); 3071 3072 if (priv == NULL) { 3073 DPAA2_SEC_ERR("No memory for priv CTXT"); 3074 return -ENOMEM; 3075 } 3076 3077 flc = &priv->flc_desc[0].flc; 3078 3079 if (ipsec_xform->life.bytes_hard_limit != 0 || 3080 ipsec_xform->life.bytes_soft_limit != 0 || 3081 ipsec_xform->life.packets_hard_limit != 0 || 3082 ipsec_xform->life.packets_soft_limit != 0) 3083 return -ENOTSUP; 3084 3085 memset(session, 0, sizeof(dpaa2_sec_session)); 3086 3087 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 3088 cipher_xform = &conf->crypto_xform->cipher; 3089 if (conf->crypto_xform->next) 3090 auth_xform = &conf->crypto_xform->next->auth; 3091 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform, 3092 session, &cipherdata, &authdata); 3093 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 3094 auth_xform = &conf->crypto_xform->auth; 3095 if (conf->crypto_xform->next) 3096 cipher_xform = &conf->crypto_xform->next->cipher; 3097 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform, 3098 session, &cipherdata, &authdata); 3099 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { 3100 aead_xform = &conf->crypto_xform->aead; 3101 ret = dpaa2_sec_ipsec_aead_init(aead_xform, 3102 session, &cipherdata); 3103 authdata.keylen = 0; 3104 authdata.algtype = 0; 3105 } else { 3106 DPAA2_SEC_ERR("XFORM not specified"); 3107 ret = -EINVAL; 3108 goto out; 3109 } 3110 if (ret) { 3111 DPAA2_SEC_ERR("Failed to process xform"); 3112 goto out; 3113 } 3114 3115 session->ctxt_type = DPAA2_SEC_IPSEC; 3116 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 3117 uint8_t hdr[48] = {}; 3118 struct rte_ipv4_hdr *ip4_hdr; 3119 struct rte_ipv6_hdr *ip6_hdr; 3120 struct ipsec_encap_pdb encap_pdb; 3121 3122 flc->dhr = SEC_FLC_DHR_OUTBOUND; 3123 /* For Sec Proto only one descriptor is required. */ 3124 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb)); 3125 3126 /* copy algo specific data to PDB */ 3127 switch (cipherdata.algtype) { 3128 case OP_PCL_IPSEC_AES_CTR: 3129 encap_pdb.ctr.ctr_initial = 0x00000001; 3130 encap_pdb.ctr.ctr_nonce = ipsec_xform->salt; 3131 break; 3132 case OP_PCL_IPSEC_AES_GCM8: 3133 case OP_PCL_IPSEC_AES_GCM12: 3134 case OP_PCL_IPSEC_AES_GCM16: 3135 memcpy(encap_pdb.gcm.salt, 3136 (uint8_t *)&(ipsec_xform->salt), 4); 3137 break; 3138 } 3139 3140 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) | 3141 PDBOPTS_ESP_OIHI_PDB_INL | 3142 PDBHMO_ESP_SNR; 3143 3144 if (ipsec_xform->options.iv_gen_disable == 0) 3145 encap_pdb.options |= PDBOPTS_ESP_IVSRC; 3146 /* Initializing the sequence number to 1, Security 3147 * engine will choose this sequence number for first packet 3148 * Refer: RFC4303 section: 3.3.3.Sequence Number Generation 3149 */ 3150 encap_pdb.seq_num = 1; 3151 if (ipsec_xform->options.esn) { 3152 encap_pdb.options |= PDBOPTS_ESP_ESN; 3153 encap_pdb.seq_num_ext_hi = conf->ipsec.esn.hi; 3154 encap_pdb.seq_num = conf->ipsec.esn.low; 3155 } 3156 if (ipsec_xform->options.copy_dscp) 3157 encap_pdb.options |= PDBOPTS_ESP_DIFFSERV; 3158 if (ipsec_xform->options.ecn) 3159 encap_pdb.options |= PDBOPTS_ESP_TECN; 3160 encap_pdb.spi = ipsec_xform->spi; 3161 session->dir = DIR_ENC; 3162 if (ipsec_xform->tunnel.type == 3163 RTE_SECURITY_IPSEC_TUNNEL_IPV4) { 3164 if (ipsec_xform->options.dec_ttl) 3165 encap_pdb.options |= PDBHMO_ESP_ENCAP_DTTL; 3166 if (ipsec_xform->options.copy_df) 3167 encap_pdb.options |= PDBHMO_ESP_DFBIT; 3168 ip4_hdr = (struct rte_ipv4_hdr *)hdr; 3169 3170 encap_pdb.ip_hdr_len = sizeof(struct rte_ipv4_hdr); 3171 ip4_hdr->version_ihl = RTE_IPV4_VHL_DEF; 3172 ip4_hdr->time_to_live = ipsec_xform->tunnel.ipv4.ttl ? 3173 ipsec_xform->tunnel.ipv4.ttl : 0x40; 3174 ip4_hdr->type_of_service = (ipsec_xform->tunnel.ipv4.dscp<<2); 3175 3176 ip4_hdr->hdr_checksum = 0; 3177 ip4_hdr->packet_id = 0; 3178 if (ipsec_xform->tunnel.ipv4.df) { 3179 uint16_t frag_off = 0; 3180 3181 frag_off |= RTE_IPV4_HDR_DF_FLAG; 3182 ip4_hdr->fragment_offset = rte_cpu_to_be_16(frag_off); 3183 } else 3184 ip4_hdr->fragment_offset = 0; 3185 3186 memcpy(&ip4_hdr->src_addr, &ipsec_xform->tunnel.ipv4.src_ip, 3187 sizeof(struct in_addr)); 3188 memcpy(&ip4_hdr->dst_addr, &ipsec_xform->tunnel.ipv4.dst_ip, 3189 sizeof(struct in_addr)); 3190 if (ipsec_xform->options.udp_encap) { 3191 uint16_t sport, dport; 3192 struct rte_udp_hdr *uh = 3193 (struct rte_udp_hdr *) (hdr + 3194 sizeof(struct rte_ipv4_hdr)); 3195 3196 sport = ipsec_xform->udp.sport ? 3197 ipsec_xform->udp.sport : 4500; 3198 dport = ipsec_xform->udp.dport ? 3199 ipsec_xform->udp.dport : 4500; 3200 uh->src_port = rte_cpu_to_be_16(sport); 3201 uh->dst_port = rte_cpu_to_be_16(dport); 3202 uh->dgram_len = 0; 3203 uh->dgram_cksum = 0; 3204 3205 ip4_hdr->next_proto_id = IPPROTO_UDP; 3206 ip4_hdr->total_length = 3207 rte_cpu_to_be_16( 3208 sizeof(struct rte_ipv4_hdr) + 3209 sizeof(struct rte_udp_hdr)); 3210 encap_pdb.ip_hdr_len += 3211 sizeof(struct rte_udp_hdr); 3212 encap_pdb.options |= 3213 PDBOPTS_ESP_NAT | PDBOPTS_ESP_NUC; 3214 } else { 3215 ip4_hdr->total_length = 3216 rte_cpu_to_be_16( 3217 sizeof(struct rte_ipv4_hdr)); 3218 ip4_hdr->next_proto_id = IPPROTO_ESP; 3219 } 3220 3221 ip4_hdr->hdr_checksum = calc_chksum((uint16_t *) 3222 (void *)ip4_hdr, sizeof(struct rte_ipv4_hdr)); 3223 3224 } else if (ipsec_xform->tunnel.type == 3225 RTE_SECURITY_IPSEC_TUNNEL_IPV6) { 3226 ip6_hdr = (struct rte_ipv6_hdr *)hdr; 3227 3228 ip6_hdr->vtc_flow = rte_cpu_to_be_32( 3229 DPAA2_IPv6_DEFAULT_VTC_FLOW | 3230 ((ipsec_xform->tunnel.ipv6.dscp << 3231 RTE_IPV6_HDR_TC_SHIFT) & 3232 RTE_IPV6_HDR_TC_MASK) | 3233 ((ipsec_xform->tunnel.ipv6.flabel << 3234 RTE_IPV6_HDR_FL_SHIFT) & 3235 RTE_IPV6_HDR_FL_MASK)); 3236 /* Payload length will be updated by HW */ 3237 ip6_hdr->payload_len = 0; 3238 ip6_hdr->hop_limits = ipsec_xform->tunnel.ipv6.hlimit ? 3239 ipsec_xform->tunnel.ipv6.hlimit : 0x40; 3240 ip6_hdr->proto = (ipsec_xform->proto == 3241 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? 3242 IPPROTO_ESP : IPPROTO_AH; 3243 memcpy(&ip6_hdr->src_addr, 3244 &ipsec_xform->tunnel.ipv6.src_addr, 16); 3245 memcpy(&ip6_hdr->dst_addr, 3246 &ipsec_xform->tunnel.ipv6.dst_addr, 16); 3247 encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr); 3248 } 3249 3250 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc, 3251 1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ? 3252 SHR_WAIT : SHR_SERIAL, &encap_pdb, 3253 hdr, &cipherdata, &authdata); 3254 } else if (ipsec_xform->direction == 3255 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { 3256 struct ipsec_decap_pdb decap_pdb; 3257 3258 flc->dhr = SEC_FLC_DHR_INBOUND; 3259 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb)); 3260 /* copy algo specific data to PDB */ 3261 switch (cipherdata.algtype) { 3262 case OP_PCL_IPSEC_AES_CTR: 3263 decap_pdb.ctr.ctr_initial = 0x00000001; 3264 decap_pdb.ctr.ctr_nonce = ipsec_xform->salt; 3265 break; 3266 case OP_PCL_IPSEC_AES_GCM8: 3267 case OP_PCL_IPSEC_AES_GCM12: 3268 case OP_PCL_IPSEC_AES_GCM16: 3269 memcpy(decap_pdb.gcm.salt, 3270 (uint8_t *)&(ipsec_xform->salt), 4); 3271 break; 3272 } 3273 3274 if (ipsec_xform->tunnel.type == 3275 RTE_SECURITY_IPSEC_TUNNEL_IPV4) { 3276 decap_pdb.options = sizeof(struct ip) << 16; 3277 if (ipsec_xform->options.copy_df) 3278 decap_pdb.options |= PDBHMO_ESP_DFV; 3279 if (ipsec_xform->options.dec_ttl) 3280 decap_pdb.options |= PDBHMO_ESP_DECAP_DTTL; 3281 } else { 3282 decap_pdb.options = sizeof(struct rte_ipv6_hdr) << 16; 3283 } 3284 if (ipsec_xform->options.esn) { 3285 decap_pdb.options |= PDBOPTS_ESP_ESN; 3286 decap_pdb.seq_num_ext_hi = conf->ipsec.esn.hi; 3287 decap_pdb.seq_num = conf->ipsec.esn.low; 3288 } 3289 if (ipsec_xform->options.copy_dscp) 3290 decap_pdb.options |= PDBOPTS_ESP_DIFFSERV; 3291 if (ipsec_xform->options.ecn) 3292 decap_pdb.options |= PDBOPTS_ESP_TECN; 3293 3294 if (ipsec_xform->replay_win_sz) { 3295 uint32_t win_sz; 3296 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz); 3297 3298 if (rta_sec_era < RTA_SEC_ERA_10 && win_sz > 128) { 3299 DPAA2_SEC_INFO("Max Anti replay Win sz = 128"); 3300 win_sz = 128; 3301 } 3302 switch (win_sz) { 3303 case 1: 3304 case 2: 3305 case 4: 3306 case 8: 3307 case 16: 3308 case 32: 3309 decap_pdb.options |= PDBOPTS_ESP_ARS32; 3310 break; 3311 case 64: 3312 decap_pdb.options |= PDBOPTS_ESP_ARS64; 3313 break; 3314 case 256: 3315 decap_pdb.options |= PDBOPTS_ESP_ARS256; 3316 break; 3317 case 512: 3318 decap_pdb.options |= PDBOPTS_ESP_ARS512; 3319 break; 3320 case 1024: 3321 decap_pdb.options |= PDBOPTS_ESP_ARS1024; 3322 break; 3323 case 128: 3324 default: 3325 decap_pdb.options |= PDBOPTS_ESP_ARS128; 3326 } 3327 } 3328 session->dir = DIR_DEC; 3329 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc, 3330 1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ? 3331 SHR_WAIT : SHR_SERIAL, 3332 &decap_pdb, &cipherdata, &authdata); 3333 } else 3334 goto out; 3335 3336 if (bufsize < 0) { 3337 DPAA2_SEC_ERR("Crypto: Invalid SEC-DESC buffer length"); 3338 goto out; 3339 } 3340 3341 flc->word1_sdl = (uint8_t)bufsize; 3342 3343 /* Enable the stashing control bit */ 3344 DPAA2_SET_FLC_RSC(flc); 3345 flc->word2_rflc_31_0 = lower_32_bits( 3346 (size_t)&(((struct dpaa2_sec_qp *) 3347 dev->data->queue_pairs[0])->rx_vq) | 0x14); 3348 flc->word3_rflc_63_32 = upper_32_bits( 3349 (size_t)&(((struct dpaa2_sec_qp *) 3350 dev->data->queue_pairs[0])->rx_vq)); 3351 3352 /* Set EWS bit i.e. enable write-safe */ 3353 DPAA2_SET_FLC_EWS(flc); 3354 /* Set BS = 1 i.e reuse input buffers as output buffers */ 3355 DPAA2_SET_FLC_REUSE_BS(flc); 3356 /* Set FF = 10; reuse input buffers if they provide sufficient space */ 3357 DPAA2_SET_FLC_REUSE_FF(flc); 3358 3359 session->ctxt = priv; 3360 3361 return 0; 3362 out: 3363 rte_free(session->auth_key.data); 3364 rte_free(session->cipher_key.data); 3365 rte_free(priv); 3366 return ret; 3367 } 3368 3369 static int 3370 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev, 3371 struct rte_security_session_conf *conf, 3372 void *sess) 3373 { 3374 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp; 3375 struct rte_crypto_sym_xform *xform = conf->crypto_xform; 3376 struct rte_crypto_auth_xform *auth_xform = NULL; 3377 struct rte_crypto_cipher_xform *cipher_xform = NULL; 3378 dpaa2_sec_session *session = (dpaa2_sec_session *)sess; 3379 struct ctxt_priv *priv; 3380 struct alginfo authdata, cipherdata; 3381 struct alginfo *p_authdata = NULL; 3382 int bufsize = -1; 3383 struct sec_flow_context *flc; 3384 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 3385 int swap = true; 3386 #else 3387 int swap = false; 3388 #endif 3389 3390 PMD_INIT_FUNC_TRACE(); 3391 3392 memset(session, 0, sizeof(dpaa2_sec_session)); 3393 3394 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 3395 sizeof(struct ctxt_priv) + 3396 sizeof(struct sec_flc_desc), 3397 RTE_CACHE_LINE_SIZE); 3398 3399 if (priv == NULL) { 3400 DPAA2_SEC_ERR("No memory for priv CTXT"); 3401 return -ENOMEM; 3402 } 3403 3404 flc = &priv->flc_desc[0].flc; 3405 3406 /* find xfrm types */ 3407 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 3408 cipher_xform = &xform->cipher; 3409 if (xform->next != NULL && 3410 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 3411 session->ext_params.aead_ctxt.auth_cipher_text = true; 3412 auth_xform = &xform->next->auth; 3413 } 3414 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 3415 auth_xform = &xform->auth; 3416 if (xform->next != NULL && 3417 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 3418 session->ext_params.aead_ctxt.auth_cipher_text = false; 3419 cipher_xform = &xform->next->cipher; 3420 } 3421 } else { 3422 DPAA2_SEC_ERR("Invalid crypto type"); 3423 return -EINVAL; 3424 } 3425 3426 session->ctxt_type = DPAA2_SEC_PDCP; 3427 if (cipher_xform) { 3428 session->cipher_key.data = rte_zmalloc(NULL, 3429 cipher_xform->key.length, 3430 RTE_CACHE_LINE_SIZE); 3431 if (session->cipher_key.data == NULL && 3432 cipher_xform->key.length > 0) { 3433 DPAA2_SEC_ERR("No Memory for cipher key"); 3434 rte_free(priv); 3435 return -ENOMEM; 3436 } 3437 session->cipher_key.length = cipher_xform->key.length; 3438 memcpy(session->cipher_key.data, cipher_xform->key.data, 3439 cipher_xform->key.length); 3440 session->dir = 3441 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 3442 DIR_ENC : DIR_DEC; 3443 session->cipher_alg = cipher_xform->algo; 3444 } else { 3445 session->cipher_key.data = NULL; 3446 session->cipher_key.length = 0; 3447 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 3448 session->dir = DIR_ENC; 3449 } 3450 3451 session->pdcp.domain = pdcp_xform->domain; 3452 session->pdcp.bearer = pdcp_xform->bearer; 3453 session->pdcp.pkt_dir = pdcp_xform->pkt_dir; 3454 session->pdcp.sn_size = pdcp_xform->sn_size; 3455 session->pdcp.hfn = pdcp_xform->hfn; 3456 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold; 3457 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd; 3458 /* hfv ovd offset location is stored in iv.offset value*/ 3459 if (cipher_xform) 3460 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset; 3461 3462 cipherdata.key = (size_t)session->cipher_key.data; 3463 cipherdata.keylen = session->cipher_key.length; 3464 cipherdata.key_enc_flags = 0; 3465 cipherdata.key_type = RTA_DATA_IMM; 3466 3467 switch (session->cipher_alg) { 3468 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 3469 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW; 3470 break; 3471 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 3472 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC; 3473 break; 3474 case RTE_CRYPTO_CIPHER_AES_CTR: 3475 cipherdata.algtype = PDCP_CIPHER_TYPE_AES; 3476 break; 3477 case RTE_CRYPTO_CIPHER_NULL: 3478 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL; 3479 break; 3480 default: 3481 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 3482 session->cipher_alg); 3483 goto out; 3484 } 3485 3486 if (auth_xform) { 3487 session->auth_key.data = rte_zmalloc(NULL, 3488 auth_xform->key.length, 3489 RTE_CACHE_LINE_SIZE); 3490 if (!session->auth_key.data && 3491 auth_xform->key.length > 0) { 3492 DPAA2_SEC_ERR("No Memory for auth key"); 3493 rte_free(session->cipher_key.data); 3494 rte_free(priv); 3495 return -ENOMEM; 3496 } 3497 session->auth_key.length = auth_xform->key.length; 3498 memcpy(session->auth_key.data, auth_xform->key.data, 3499 auth_xform->key.length); 3500 session->auth_alg = auth_xform->algo; 3501 } else { 3502 session->auth_key.data = NULL; 3503 session->auth_key.length = 0; 3504 session->auth_alg = 0; 3505 authdata.algtype = PDCP_AUTH_TYPE_NULL; 3506 } 3507 authdata.key = (size_t)session->auth_key.data; 3508 authdata.keylen = session->auth_key.length; 3509 authdata.key_enc_flags = 0; 3510 authdata.key_type = RTA_DATA_IMM; 3511 3512 if (session->auth_alg) { 3513 switch (session->auth_alg) { 3514 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 3515 authdata.algtype = PDCP_AUTH_TYPE_SNOW; 3516 break; 3517 case RTE_CRYPTO_AUTH_ZUC_EIA3: 3518 authdata.algtype = PDCP_AUTH_TYPE_ZUC; 3519 break; 3520 case RTE_CRYPTO_AUTH_AES_CMAC: 3521 authdata.algtype = PDCP_AUTH_TYPE_AES; 3522 break; 3523 case RTE_CRYPTO_AUTH_NULL: 3524 authdata.algtype = PDCP_AUTH_TYPE_NULL; 3525 break; 3526 default: 3527 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 3528 session->auth_alg); 3529 goto out; 3530 } 3531 p_authdata = &authdata; 3532 } else { 3533 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 3534 DPAA2_SEC_ERR("Crypto: Integrity must for c-plane"); 3535 goto out; 3536 } 3537 session->auth_key.data = NULL; 3538 session->auth_key.length = 0; 3539 session->auth_alg = 0; 3540 } 3541 authdata.key = (size_t)session->auth_key.data; 3542 authdata.keylen = session->auth_key.length; 3543 authdata.key_enc_flags = 0; 3544 authdata.key_type = RTA_DATA_IMM; 3545 3546 if (pdcp_xform->sdap_enabled) { 3547 int nb_keys_to_inline = 3548 rta_inline_pdcp_sdap_query(authdata.algtype, 3549 cipherdata.algtype, 3550 session->pdcp.sn_size, 3551 session->pdcp.hfn_ovd); 3552 if (nb_keys_to_inline >= 1) { 3553 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); 3554 cipherdata.key_type = RTA_DATA_PTR; 3555 } 3556 if (nb_keys_to_inline >= 2) { 3557 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key); 3558 authdata.key_type = RTA_DATA_PTR; 3559 } 3560 } else { 3561 if (rta_inline_pdcp_query(authdata.algtype, 3562 cipherdata.algtype, 3563 session->pdcp.sn_size, 3564 session->pdcp.hfn_ovd)) { 3565 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); 3566 cipherdata.key_type = RTA_DATA_PTR; 3567 } 3568 } 3569 3570 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 3571 if (session->dir == DIR_ENC) 3572 bufsize = cnstr_shdsc_pdcp_c_plane_encap( 3573 priv->flc_desc[0].desc, 1, swap, 3574 pdcp_xform->hfn, 3575 session->pdcp.sn_size, 3576 pdcp_xform->bearer, 3577 pdcp_xform->pkt_dir, 3578 pdcp_xform->hfn_threshold, 3579 &cipherdata, &authdata); 3580 else if (session->dir == DIR_DEC) 3581 bufsize = cnstr_shdsc_pdcp_c_plane_decap( 3582 priv->flc_desc[0].desc, 1, swap, 3583 pdcp_xform->hfn, 3584 session->pdcp.sn_size, 3585 pdcp_xform->bearer, 3586 pdcp_xform->pkt_dir, 3587 pdcp_xform->hfn_threshold, 3588 &cipherdata, &authdata); 3589 3590 } else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) { 3591 bufsize = cnstr_shdsc_pdcp_short_mac(priv->flc_desc[0].desc, 3592 1, swap, &authdata); 3593 } else { 3594 if (session->dir == DIR_ENC) { 3595 if (pdcp_xform->sdap_enabled) 3596 bufsize = cnstr_shdsc_pdcp_sdap_u_plane_encap( 3597 priv->flc_desc[0].desc, 1, swap, 3598 session->pdcp.sn_size, 3599 pdcp_xform->hfn, 3600 pdcp_xform->bearer, 3601 pdcp_xform->pkt_dir, 3602 pdcp_xform->hfn_threshold, 3603 &cipherdata, p_authdata); 3604 else 3605 bufsize = cnstr_shdsc_pdcp_u_plane_encap( 3606 priv->flc_desc[0].desc, 1, swap, 3607 session->pdcp.sn_size, 3608 pdcp_xform->hfn, 3609 pdcp_xform->bearer, 3610 pdcp_xform->pkt_dir, 3611 pdcp_xform->hfn_threshold, 3612 &cipherdata, p_authdata); 3613 } else if (session->dir == DIR_DEC) { 3614 if (pdcp_xform->sdap_enabled) 3615 bufsize = cnstr_shdsc_pdcp_sdap_u_plane_decap( 3616 priv->flc_desc[0].desc, 1, swap, 3617 session->pdcp.sn_size, 3618 pdcp_xform->hfn, 3619 pdcp_xform->bearer, 3620 pdcp_xform->pkt_dir, 3621 pdcp_xform->hfn_threshold, 3622 &cipherdata, p_authdata); 3623 else 3624 bufsize = cnstr_shdsc_pdcp_u_plane_decap( 3625 priv->flc_desc[0].desc, 1, swap, 3626 session->pdcp.sn_size, 3627 pdcp_xform->hfn, 3628 pdcp_xform->bearer, 3629 pdcp_xform->pkt_dir, 3630 pdcp_xform->hfn_threshold, 3631 &cipherdata, p_authdata); 3632 } 3633 } 3634 3635 if (bufsize < 0) { 3636 DPAA2_SEC_ERR("Crypto: Invalid SEC-DESC buffer length"); 3637 goto out; 3638 } 3639 3640 /* Enable the stashing control bit */ 3641 DPAA2_SET_FLC_RSC(flc); 3642 flc->word2_rflc_31_0 = lower_32_bits( 3643 (size_t)&(((struct dpaa2_sec_qp *) 3644 dev->data->queue_pairs[0])->rx_vq) | 0x14); 3645 flc->word3_rflc_63_32 = upper_32_bits( 3646 (size_t)&(((struct dpaa2_sec_qp *) 3647 dev->data->queue_pairs[0])->rx_vq)); 3648 3649 flc->word1_sdl = (uint8_t)bufsize; 3650 3651 /* TODO - check the perf impact or 3652 * align as per descriptor type 3653 * Set EWS bit i.e. enable write-safe 3654 * DPAA2_SET_FLC_EWS(flc); 3655 */ 3656 3657 /* Set BS = 1 i.e reuse input buffers as output buffers */ 3658 DPAA2_SET_FLC_REUSE_BS(flc); 3659 /* Set FF = 10; reuse input buffers if they provide sufficient space */ 3660 DPAA2_SET_FLC_REUSE_FF(flc); 3661 3662 session->ctxt = priv; 3663 3664 return 0; 3665 out: 3666 rte_free(session->auth_key.data); 3667 rte_free(session->cipher_key.data); 3668 rte_free(priv); 3669 return -EINVAL; 3670 } 3671 3672 static int 3673 dpaa2_sec_security_session_create(void *dev, 3674 struct rte_security_session_conf *conf, 3675 struct rte_security_session *sess) 3676 { 3677 void *sess_private_data = SECURITY_GET_SESS_PRIV(sess); 3678 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; 3679 int ret; 3680 3681 switch (conf->protocol) { 3682 case RTE_SECURITY_PROTOCOL_IPSEC: 3683 ret = dpaa2_sec_set_ipsec_session(cdev, conf, 3684 sess_private_data); 3685 break; 3686 case RTE_SECURITY_PROTOCOL_MACSEC: 3687 return -ENOTSUP; 3688 case RTE_SECURITY_PROTOCOL_PDCP: 3689 ret = dpaa2_sec_set_pdcp_session(cdev, conf, 3690 sess_private_data); 3691 break; 3692 default: 3693 return -EINVAL; 3694 } 3695 if (ret != 0) { 3696 DPAA2_SEC_DEBUG("Failed to configure session parameters %d", ret); 3697 return ret; 3698 } 3699 3700 return ret; 3701 } 3702 3703 /** Clear the memory of session so it doesn't leave key material behind */ 3704 static int 3705 dpaa2_sec_security_session_destroy(void *dev __rte_unused, 3706 struct rte_security_session *sess) 3707 { 3708 PMD_INIT_FUNC_TRACE(); 3709 void *sess_priv = SECURITY_GET_SESS_PRIV(sess); 3710 3711 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv; 3712 3713 if (sess_priv) { 3714 rte_free(s->ctxt); 3715 rte_free(s->cipher_key.data); 3716 rte_free(s->auth_key.data); 3717 memset(s, 0, sizeof(dpaa2_sec_session)); 3718 } 3719 return 0; 3720 } 3721 3722 static int 3723 dpaa2_sec_security_session_update(void *dev, 3724 struct rte_security_session *sess, 3725 struct rte_security_session_conf *conf) 3726 { 3727 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; 3728 void *sess_private_data = SECURITY_GET_SESS_PRIV(sess); 3729 int ret; 3730 3731 if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC && 3732 conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) 3733 return -ENOTSUP; 3734 3735 dpaa2_sec_security_session_destroy(dev, sess); 3736 3737 ret = dpaa2_sec_set_ipsec_session(cdev, conf, 3738 sess_private_data); 3739 if (ret != 0) { 3740 DPAA2_SEC_DEBUG("Failed to configure session parameters %d", ret); 3741 return ret; 3742 } 3743 3744 return ret; 3745 } 3746 3747 static unsigned int 3748 dpaa2_sec_security_session_get_size(void *device __rte_unused) 3749 { 3750 return sizeof(dpaa2_sec_session); 3751 } 3752 3753 static int 3754 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev __rte_unused, 3755 struct rte_crypto_sym_xform *xform, 3756 struct rte_cryptodev_sym_session *sess) 3757 { 3758 void *sess_private_data = CRYPTODEV_GET_SYM_SESS_PRIV(sess); 3759 int ret; 3760 3761 ret = dpaa2_sec_set_session_parameters(xform, sess_private_data); 3762 if (ret != 0) { 3763 DPAA2_SEC_DEBUG("Failed to configure session parameters %d", ret); 3764 /* Return session to mempool */ 3765 return ret; 3766 } 3767 3768 return 0; 3769 } 3770 3771 /** Clear the memory of session so it doesn't leave key material behind */ 3772 static void 3773 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev __rte_unused, 3774 struct rte_cryptodev_sym_session *sess) 3775 { 3776 PMD_INIT_FUNC_TRACE(); 3777 dpaa2_sec_session *s = CRYPTODEV_GET_SYM_SESS_PRIV(sess); 3778 3779 if (s) { 3780 rte_free(s->ctxt); 3781 rte_free(s->cipher_key.data); 3782 rte_free(s->auth_key.data); 3783 } 3784 } 3785 3786 static int 3787 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused, 3788 struct rte_cryptodev_config *config __rte_unused) 3789 { 3790 PMD_INIT_FUNC_TRACE(); 3791 3792 return 0; 3793 } 3794 3795 static int 3796 dpaa2_sec_dev_start(struct rte_cryptodev *dev) 3797 { 3798 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3799 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3800 struct dpseci_attr attr; 3801 struct dpaa2_queue *dpaa2_q; 3802 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3803 dev->data->queue_pairs; 3804 struct dpseci_rx_queue_attr rx_attr; 3805 struct dpseci_tx_queue_attr tx_attr; 3806 int ret, i; 3807 3808 PMD_INIT_FUNC_TRACE(); 3809 3810 /* Change the tx burst function if ordered queues are used */ 3811 if (priv->en_ordered) 3812 dev->enqueue_burst = dpaa2_sec_enqueue_burst_ordered; 3813 3814 memset(&attr, 0, sizeof(struct dpseci_attr)); 3815 3816 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token); 3817 if (ret) { 3818 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED", 3819 priv->hw_id); 3820 goto get_attr_failure; 3821 } 3822 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr); 3823 if (ret) { 3824 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC"); 3825 goto get_attr_failure; 3826 } 3827 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) { 3828 dpaa2_q = &qp[i]->rx_vq; 3829 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 3830 &rx_attr); 3831 dpaa2_q->fqid = rx_attr.fqid; 3832 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid); 3833 } 3834 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) { 3835 dpaa2_q = &qp[i]->tx_vq; 3836 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 3837 &tx_attr); 3838 dpaa2_q->fqid = tx_attr.fqid; 3839 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid); 3840 } 3841 3842 return 0; 3843 get_attr_failure: 3844 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 3845 return -1; 3846 } 3847 3848 static void 3849 dpaa2_sec_dev_stop(struct rte_cryptodev *dev) 3850 { 3851 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3852 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3853 int ret; 3854 3855 PMD_INIT_FUNC_TRACE(); 3856 3857 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 3858 if (ret) { 3859 DPAA2_SEC_ERR("Failure in disabling dpseci %d device", 3860 priv->hw_id); 3861 return; 3862 } 3863 3864 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token); 3865 if (ret < 0) { 3866 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret); 3867 return; 3868 } 3869 } 3870 3871 static int 3872 dpaa2_sec_dev_close(struct rte_cryptodev *dev __rte_unused) 3873 { 3874 PMD_INIT_FUNC_TRACE(); 3875 3876 return 0; 3877 } 3878 3879 static void 3880 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev, 3881 struct rte_cryptodev_info *info) 3882 { 3883 struct dpaa2_sec_dev_private *internals = dev->data->dev_private; 3884 3885 PMD_INIT_FUNC_TRACE(); 3886 if (info != NULL) { 3887 info->max_nb_queue_pairs = internals->max_nb_queue_pairs; 3888 info->feature_flags = dev->feature_flags; 3889 info->capabilities = dpaa2_sec_capabilities; 3890 /* No limit of number of sessions */ 3891 info->sym.max_nb_sessions = 0; 3892 info->driver_id = cryptodev_driver_id; 3893 } 3894 } 3895 3896 static 3897 void dpaa2_sec_stats_get(struct rte_cryptodev *dev, 3898 struct rte_cryptodev_stats *stats) 3899 { 3900 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3901 struct fsl_mc_io dpseci; 3902 struct dpseci_sec_counters counters = {0}; 3903 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3904 dev->data->queue_pairs; 3905 int ret, i; 3906 3907 PMD_INIT_FUNC_TRACE(); 3908 if (stats == NULL) { 3909 DPAA2_SEC_ERR("Invalid stats ptr NULL"); 3910 return; 3911 } 3912 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 3913 if (qp == NULL || qp[i] == NULL) { 3914 DPAA2_SEC_DEBUG("Uninitialised queue pair"); 3915 continue; 3916 } 3917 3918 stats->enqueued_count += qp[i]->tx_vq.tx_pkts; 3919 stats->dequeued_count += qp[i]->rx_vq.rx_pkts; 3920 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts; 3921 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts; 3922 } 3923 3924 /* In case as secondary process access stats, MCP portal in priv-hw 3925 * may have primary process address. Need the secondary process 3926 * based MCP portal address for this object. 3927 */ 3928 dpseci.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX); 3929 ret = dpseci_get_sec_counters(&dpseci, CMD_PRI_LOW, priv->token, 3930 &counters); 3931 if (ret) { 3932 DPAA2_SEC_ERR("SEC counters failed"); 3933 } else { 3934 DPAA2_SEC_INFO("dpseci hardware stats:" 3935 "\n\tNum of Requests Dequeued = %" PRIu64 3936 "\n\tNum of Outbound Encrypt Requests = %" PRIu64 3937 "\n\tNum of Inbound Decrypt Requests = %" PRIu64 3938 "\n\tNum of Outbound Bytes Encrypted = %" PRIu64 3939 "\n\tNum of Outbound Bytes Protected = %" PRIu64 3940 "\n\tNum of Inbound Bytes Decrypted = %" PRIu64 3941 "\n\tNum of Inbound Bytes Validated = %" PRIu64, 3942 counters.dequeued_requests, 3943 counters.ob_enc_requests, 3944 counters.ib_dec_requests, 3945 counters.ob_enc_bytes, 3946 counters.ob_prot_bytes, 3947 counters.ib_dec_bytes, 3948 counters.ib_valid_bytes); 3949 } 3950 } 3951 3952 static 3953 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev) 3954 { 3955 int i; 3956 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3957 (dev->data->queue_pairs); 3958 3959 PMD_INIT_FUNC_TRACE(); 3960 3961 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 3962 if (qp[i] == NULL) { 3963 DPAA2_SEC_DEBUG("Uninitialised queue pair"); 3964 continue; 3965 } 3966 qp[i]->tx_vq.rx_pkts = 0; 3967 qp[i]->tx_vq.tx_pkts = 0; 3968 qp[i]->tx_vq.err_pkts = 0; 3969 qp[i]->rx_vq.rx_pkts = 0; 3970 qp[i]->rx_vq.tx_pkts = 0; 3971 qp[i]->rx_vq.err_pkts = 0; 3972 } 3973 } 3974 3975 static void __rte_hot 3976 dpaa2_sec_process_parallel_event(struct qbman_swp *swp, 3977 const struct qbman_fd *fd, 3978 const struct qbman_result *dq, 3979 struct dpaa2_queue *rxq, 3980 struct rte_event *ev) 3981 { 3982 struct dpaa2_sec_qp *qp; 3983 /* Prefetching mbuf */ 3984 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)- 3985 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size)); 3986 3987 /* Prefetching ipsec crypto_op stored in priv data of mbuf */ 3988 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64)); 3989 3990 qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq); 3991 ev->flow_id = rxq->ev.flow_id; 3992 ev->sub_event_type = rxq->ev.sub_event_type; 3993 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3994 ev->op = RTE_EVENT_OP_NEW; 3995 ev->sched_type = rxq->ev.sched_type; 3996 ev->queue_id = rxq->ev.queue_id; 3997 ev->priority = rxq->ev.priority; 3998 ev->event_ptr = sec_fd_to_mbuf(fd, qp); 3999 4000 qbman_swp_dqrr_consume(swp, dq); 4001 } 4002 static void 4003 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused, 4004 const struct qbman_fd *fd, 4005 const struct qbman_result *dq, 4006 struct dpaa2_queue *rxq, 4007 struct rte_event *ev) 4008 { 4009 uint8_t dqrr_index; 4010 struct dpaa2_sec_qp *qp; 4011 struct rte_crypto_op *crypto_op; 4012 /* Prefetching mbuf */ 4013 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)- 4014 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size)); 4015 4016 /* Prefetching ipsec crypto_op stored in priv data of mbuf */ 4017 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64)); 4018 4019 qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq); 4020 ev->flow_id = rxq->ev.flow_id; 4021 ev->sub_event_type = rxq->ev.sub_event_type; 4022 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 4023 ev->op = RTE_EVENT_OP_NEW; 4024 ev->sched_type = rxq->ev.sched_type; 4025 ev->queue_id = rxq->ev.queue_id; 4026 ev->priority = rxq->ev.priority; 4027 4028 crypto_op = sec_fd_to_mbuf(fd, qp); 4029 dqrr_index = qbman_get_dqrr_idx(dq); 4030 *dpaa2_seqn(crypto_op->sym->m_src) = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index; 4031 DPAA2_PER_LCORE_DQRR_SIZE++; 4032 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; 4033 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src; 4034 ev->event_ptr = crypto_op; 4035 } 4036 4037 static void __rte_hot 4038 dpaa2_sec_process_ordered_event(struct qbman_swp *swp, 4039 const struct qbman_fd *fd, 4040 const struct qbman_result *dq, 4041 struct dpaa2_queue *rxq, 4042 struct rte_event *ev) 4043 { 4044 struct rte_crypto_op *crypto_op; 4045 struct dpaa2_sec_qp *qp; 4046 4047 /* Prefetching mbuf */ 4048 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)- 4049 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size)); 4050 4051 /* Prefetching ipsec crypto_op stored in priv data of mbuf */ 4052 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64)); 4053 4054 qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq); 4055 ev->flow_id = rxq->ev.flow_id; 4056 ev->sub_event_type = rxq->ev.sub_event_type; 4057 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 4058 ev->op = RTE_EVENT_OP_NEW; 4059 ev->sched_type = rxq->ev.sched_type; 4060 ev->queue_id = rxq->ev.queue_id; 4061 ev->priority = rxq->ev.priority; 4062 crypto_op = sec_fd_to_mbuf(fd, qp); 4063 4064 *dpaa2_seqn(crypto_op->sym->m_src) = DPAA2_ENQUEUE_FLAG_ORP; 4065 *dpaa2_seqn(crypto_op->sym->m_src) |= qbman_result_DQ_odpid(dq) << 4066 DPAA2_EQCR_OPRID_SHIFT; 4067 *dpaa2_seqn(crypto_op->sym->m_src) |= qbman_result_DQ_seqnum(dq) << 4068 DPAA2_EQCR_SEQNUM_SHIFT; 4069 4070 qbman_swp_dqrr_consume(swp, dq); 4071 ev->event_ptr = crypto_op; 4072 } 4073 4074 int 4075 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev, 4076 int qp_id, 4077 struct dpaa2_dpcon_dev *dpcon, 4078 const struct rte_event *event) 4079 { 4080 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 4081 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 4082 struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id]; 4083 struct dpseci_rx_queue_cfg cfg; 4084 uint8_t priority; 4085 int ret; 4086 4087 if (event->sched_type == RTE_SCHED_TYPE_PARALLEL) 4088 qp->rx_vq.cb = dpaa2_sec_process_parallel_event; 4089 else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) 4090 qp->rx_vq.cb = dpaa2_sec_process_atomic_event; 4091 else if (event->sched_type == RTE_SCHED_TYPE_ORDERED) 4092 qp->rx_vq.cb = dpaa2_sec_process_ordered_event; 4093 else 4094 return -EINVAL; 4095 4096 priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / event->priority) * 4097 (dpcon->num_priorities - 1); 4098 4099 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 4100 cfg.options = DPSECI_QUEUE_OPT_DEST; 4101 cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON; 4102 cfg.dest_cfg.dest_id = dpcon->dpcon_id; 4103 cfg.dest_cfg.priority = priority; 4104 4105 cfg.options |= DPSECI_QUEUE_OPT_USER_CTX; 4106 cfg.user_ctx = (size_t)(&qp->rx_vq); 4107 if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) { 4108 cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION; 4109 cfg.order_preservation_en = 1; 4110 } 4111 4112 if (event->sched_type == RTE_SCHED_TYPE_ORDERED) { 4113 struct opr_cfg ocfg; 4114 4115 /* Restoration window size = 256 frames */ 4116 ocfg.oprrws = 3; 4117 /* Restoration window size = 512 frames for LX2 */ 4118 if (dpaa2_svr_family == SVR_LX2160A) 4119 ocfg.oprrws = 4; 4120 /* Auto advance NESN window enabled */ 4121 ocfg.oa = 1; 4122 /* Late arrival window size disabled */ 4123 ocfg.olws = 0; 4124 /* ORL resource exhaustaion advance NESN disabled */ 4125 ocfg.oeane = 0; 4126 4127 if (priv->en_loose_ordered) 4128 ocfg.oloe = 1; 4129 else 4130 ocfg.oloe = 0; 4131 4132 ret = dpseci_set_opr(dpseci, CMD_PRI_LOW, priv->token, 4133 qp_id, OPR_OPT_CREATE, &ocfg); 4134 if (ret) { 4135 DPAA2_SEC_ERR("Error setting opr: ret: %d", ret); 4136 return ret; 4137 } 4138 qp->tx_vq.cb_eqresp_free = dpaa2_sec_free_eqresp_buf; 4139 priv->en_ordered = 1; 4140 } 4141 4142 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 4143 qp_id, &cfg); 4144 if (ret) { 4145 DPAA2_SEC_ERR("Error in dpseci_set_queue: ret: %d", ret); 4146 return ret; 4147 } 4148 4149 memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event)); 4150 4151 return 0; 4152 } 4153 4154 int 4155 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev, 4156 int qp_id) 4157 { 4158 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 4159 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 4160 struct dpseci_rx_queue_cfg cfg; 4161 int ret; 4162 4163 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 4164 cfg.options = DPSECI_QUEUE_OPT_DEST; 4165 cfg.dest_cfg.dest_type = DPSECI_DEST_NONE; 4166 4167 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 4168 qp_id, &cfg); 4169 if (ret) 4170 DPAA2_SEC_ERR("Error in dpseci_set_queue: ret: %d", ret); 4171 4172 return ret; 4173 } 4174 4175 static struct rte_cryptodev_ops crypto_ops = { 4176 .dev_configure = dpaa2_sec_dev_configure, 4177 .dev_start = dpaa2_sec_dev_start, 4178 .dev_stop = dpaa2_sec_dev_stop, 4179 .dev_close = dpaa2_sec_dev_close, 4180 .dev_infos_get = dpaa2_sec_dev_infos_get, 4181 .stats_get = dpaa2_sec_stats_get, 4182 .stats_reset = dpaa2_sec_stats_reset, 4183 .queue_pair_setup = dpaa2_sec_queue_pair_setup, 4184 .queue_pair_release = dpaa2_sec_queue_pair_release, 4185 .sym_session_get_size = dpaa2_sec_sym_session_get_size, 4186 .sym_session_configure = dpaa2_sec_sym_session_configure, 4187 .sym_session_clear = dpaa2_sec_sym_session_clear, 4188 /* Raw data-path API related operations */ 4189 .sym_get_raw_dp_ctx_size = dpaa2_sec_get_dp_ctx_size, 4190 .sym_configure_raw_dp_ctx = dpaa2_sec_configure_raw_dp_ctx, 4191 }; 4192 4193 static const struct rte_security_capability * 4194 dpaa2_sec_capabilities_get(void *device __rte_unused) 4195 { 4196 return dpaa2_sec_security_cap; 4197 } 4198 4199 static const struct rte_security_ops dpaa2_sec_security_ops = { 4200 .session_create = dpaa2_sec_security_session_create, 4201 .session_update = dpaa2_sec_security_session_update, 4202 .session_get_size = dpaa2_sec_security_session_get_size, 4203 .session_stats_get = NULL, 4204 .session_destroy = dpaa2_sec_security_session_destroy, 4205 .set_pkt_metadata = NULL, 4206 .capabilities_get = dpaa2_sec_capabilities_get 4207 }; 4208 4209 static int 4210 dpaa2_sec_uninit(const struct rte_cryptodev *dev) 4211 { 4212 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 4213 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 4214 int ret; 4215 4216 PMD_INIT_FUNC_TRACE(); 4217 4218 /* Function is reverse of dpaa2_sec_dev_init. 4219 * It does the following: 4220 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id 4221 * 2. Close the DPSECI device 4222 * 3. Free the allocated resources. 4223 */ 4224 4225 /*Close the device at underlying layer*/ 4226 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token); 4227 if (ret) { 4228 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret); 4229 return -1; 4230 } 4231 4232 /*Free the allocated memory for ethernet private data and dpseci*/ 4233 priv->hw = NULL; 4234 rte_free(dpseci); 4235 rte_free(dev->security_ctx); 4236 4237 DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u", 4238 dev->data->name, rte_socket_id()); 4239 4240 return 0; 4241 } 4242 4243 static int 4244 check_devargs_handler(const char *key, const char *value, 4245 void *opaque) 4246 { 4247 struct rte_cryptodev *dev = (struct rte_cryptodev *)opaque; 4248 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 4249 4250 if (!strcmp(key, "drv_strict_order")) { 4251 priv->en_loose_ordered = false; 4252 } else if (!strcmp(key, "drv_dump_mode")) { 4253 dpaa2_sec_dp_dump = atoi(value); 4254 if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_FULL_DUMP) { 4255 DPAA2_SEC_WARN("WARN: DPAA2_SEC_DP_DUMP_LEVEL is not " 4256 "supported, changing to FULL error" 4257 " prints"); 4258 dpaa2_sec_dp_dump = DPAA2_SEC_DP_FULL_DUMP; 4259 } 4260 } else 4261 return -1; 4262 4263 return 0; 4264 } 4265 4266 static void 4267 dpaa2_sec_get_devargs(struct rte_cryptodev *cryptodev, const char *key) 4268 { 4269 struct rte_kvargs *kvlist; 4270 struct rte_devargs *devargs; 4271 4272 devargs = cryptodev->device->devargs; 4273 if (!devargs) 4274 return; 4275 4276 kvlist = rte_kvargs_parse(devargs->args, NULL); 4277 if (!kvlist) 4278 return; 4279 4280 if (!rte_kvargs_count(kvlist, key)) { 4281 rte_kvargs_free(kvlist); 4282 return; 4283 } 4284 4285 rte_kvargs_process(kvlist, key, 4286 check_devargs_handler, (void *)cryptodev); 4287 rte_kvargs_free(kvlist); 4288 } 4289 4290 static int 4291 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev) 4292 { 4293 struct dpaa2_sec_dev_private *internals; 4294 struct rte_device *dev = cryptodev->device; 4295 struct rte_dpaa2_device *dpaa2_dev; 4296 struct rte_security_ctx *security_instance; 4297 struct fsl_mc_io *dpseci; 4298 uint16_t token; 4299 struct dpseci_attr attr; 4300 int retcode, hw_id; 4301 4302 PMD_INIT_FUNC_TRACE(); 4303 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 4304 hw_id = dpaa2_dev->object_id; 4305 4306 cryptodev->driver_id = cryptodev_driver_id; 4307 cryptodev->dev_ops = &crypto_ops; 4308 4309 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst; 4310 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst; 4311 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 4312 RTE_CRYPTODEV_FF_HW_ACCELERATED | 4313 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 4314 RTE_CRYPTODEV_FF_SECURITY | 4315 RTE_CRYPTODEV_FF_SYM_RAW_DP | 4316 RTE_CRYPTODEV_FF_IN_PLACE_SGL | 4317 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | 4318 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | 4319 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | 4320 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; 4321 4322 internals = cryptodev->data->dev_private; 4323 4324 /* 4325 * For secondary processes, we don't initialise any further as primary 4326 * has already done this work. Only check we don't need a different 4327 * RX function 4328 */ 4329 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 4330 DPAA2_SEC_DEBUG("Device already init by primary process"); 4331 return 0; 4332 } 4333 4334 /* Initialize security_ctx only for primary process*/ 4335 security_instance = rte_malloc("rte_security_instances_ops", 4336 sizeof(struct rte_security_ctx), 0); 4337 if (security_instance == NULL) 4338 return -ENOMEM; 4339 security_instance->device = (void *)cryptodev; 4340 security_instance->ops = &dpaa2_sec_security_ops; 4341 security_instance->sess_cnt = 0; 4342 cryptodev->security_ctx = security_instance; 4343 4344 /*Open the rte device via MC and save the handle for further use*/ 4345 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1, 4346 sizeof(struct fsl_mc_io), 0); 4347 if (!dpseci) { 4348 DPAA2_SEC_ERR( 4349 "Error in allocating the memory for dpsec object"); 4350 return -ENOMEM; 4351 } 4352 dpseci->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX); 4353 4354 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token); 4355 if (retcode != 0) { 4356 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x", 4357 retcode); 4358 goto init_error; 4359 } 4360 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr); 4361 if (retcode != 0) { 4362 DPAA2_SEC_ERR( 4363 "Cannot get dpsec device attributed: Error = %x", 4364 retcode); 4365 goto init_error; 4366 } 4367 snprintf(cryptodev->data->name, sizeof(cryptodev->data->name), 4368 "dpsec-%u", hw_id); 4369 4370 internals->max_nb_queue_pairs = attr.num_tx_queues; 4371 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs; 4372 internals->hw = dpseci; 4373 internals->token = token; 4374 internals->en_loose_ordered = true; 4375 4376 dpaa2_sec_get_devargs(cryptodev, DRIVER_DUMP_MODE); 4377 dpaa2_sec_get_devargs(cryptodev, DRIVER_STRICT_ORDER); 4378 DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name); 4379 return 0; 4380 4381 init_error: 4382 DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name); 4383 4384 /* dpaa2_sec_uninit(crypto_dev_name); */ 4385 return -EFAULT; 4386 } 4387 4388 static int 4389 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused, 4390 struct rte_dpaa2_device *dpaa2_dev) 4391 { 4392 struct rte_cryptodev *cryptodev; 4393 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 4394 4395 int retval; 4396 4397 snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d", 4398 dpaa2_dev->object_id); 4399 4400 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id()); 4401 if (cryptodev == NULL) 4402 return -ENOMEM; 4403 4404 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 4405 cryptodev->data->dev_private = rte_zmalloc_socket( 4406 "cryptodev private structure", 4407 sizeof(struct dpaa2_sec_dev_private), 4408 RTE_CACHE_LINE_SIZE, 4409 rte_socket_id()); 4410 4411 if (cryptodev->data->dev_private == NULL) 4412 rte_panic("Cannot allocate memzone for private " 4413 "device data"); 4414 } 4415 4416 dpaa2_dev->cryptodev = cryptodev; 4417 cryptodev->device = &dpaa2_dev->device; 4418 4419 /* init user callbacks */ 4420 TAILQ_INIT(&(cryptodev->link_intr_cbs)); 4421 4422 if (dpaa2_svr_family == SVR_LX2160A) 4423 rta_set_sec_era(RTA_SEC_ERA_10); 4424 else 4425 rta_set_sec_era(RTA_SEC_ERA_8); 4426 4427 DPAA2_SEC_INFO("2-SEC ERA is %d", USER_SEC_ERA(rta_get_sec_era())); 4428 4429 /* Invoke PMD device initialization function */ 4430 retval = dpaa2_sec_dev_init(cryptodev); 4431 if (retval == 0) { 4432 rte_cryptodev_pmd_probing_finish(cryptodev); 4433 return 0; 4434 } 4435 4436 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 4437 rte_free(cryptodev->data->dev_private); 4438 4439 cryptodev->attached = RTE_CRYPTODEV_DETACHED; 4440 4441 return -ENXIO; 4442 } 4443 4444 static int 4445 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev) 4446 { 4447 struct rte_cryptodev *cryptodev; 4448 int ret; 4449 4450 cryptodev = dpaa2_dev->cryptodev; 4451 if (cryptodev == NULL) 4452 return -ENODEV; 4453 4454 ret = dpaa2_sec_uninit(cryptodev); 4455 if (ret) 4456 return ret; 4457 4458 return rte_cryptodev_pmd_destroy(cryptodev); 4459 } 4460 4461 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = { 4462 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA, 4463 .drv_type = DPAA2_CRYPTO, 4464 .driver = { 4465 .name = "DPAA2 SEC PMD" 4466 }, 4467 .probe = cryptodev_dpaa2_sec_probe, 4468 .remove = cryptodev_dpaa2_sec_remove, 4469 }; 4470 4471 static struct cryptodev_driver dpaa2_sec_crypto_drv; 4472 4473 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver); 4474 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, 4475 rte_dpaa2_sec_driver.driver, cryptodev_driver_id); 4476 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_DPAA2_SEC_PMD, 4477 DRIVER_STRICT_ORDER "=<int>" 4478 DRIVER_DUMP_MODE "=<int>"); 4479 RTE_LOG_REGISTER(dpaa2_logtype_sec, pmd.crypto.dpaa2, NOTICE); 4480