1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2016-2023 NXP 5 * 6 */ 7 8 #include <time.h> 9 #include <net/if.h> 10 #include <unistd.h> 11 12 #include <rte_ip.h> 13 #include <rte_udp.h> 14 #include <rte_mbuf.h> 15 #include <rte_cryptodev.h> 16 #include <rte_malloc.h> 17 #include <rte_memcpy.h> 18 #include <rte_string_fns.h> 19 #include <rte_cycles.h> 20 #include <rte_kvargs.h> 21 #include <dev_driver.h> 22 #include <cryptodev_pmd.h> 23 #include <rte_common.h> 24 #include <bus_fslmc_driver.h> 25 #include <fslmc_vfio.h> 26 #include <dpaa2_hw_pvt.h> 27 #include <dpaa2_hw_dpio.h> 28 #include <dpaa2_hw_mempool.h> 29 #include <fsl_dpopr.h> 30 #include <fsl_dpseci.h> 31 #include <fsl_mc_sys.h> 32 #include <rte_hexdump.h> 33 34 #include "dpaa2_sec_priv.h" 35 #include "dpaa2_sec_event.h" 36 #include "dpaa2_sec_logs.h" 37 38 /* RTA header files */ 39 #include <desc/ipsec.h> 40 #include <desc/pdcp.h> 41 #include <desc/sdap.h> 42 #include <desc/algo.h> 43 44 /* Minimum job descriptor consists of a oneword job descriptor HEADER and 45 * a pointer to the shared descriptor 46 */ 47 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ) 48 #define FSL_VENDOR_ID 0x1957 49 #define FSL_DEVICE_ID 0x410 50 #define FSL_SUBSYSTEM_SEC 1 51 #define FSL_MC_DPSECI_DEVID 3 52 53 #define NO_PREFETCH 0 54 55 #define DRIVER_DUMP_MODE "drv_dump_mode" 56 #define DRIVER_STRICT_ORDER "drv_strict_order" 57 58 /* DPAA2_SEC_DP_DUMP levels */ 59 enum dpaa2_sec_dump_levels { 60 DPAA2_SEC_DP_NO_DUMP, 61 DPAA2_SEC_DP_ERR_DUMP, 62 DPAA2_SEC_DP_FULL_DUMP 63 }; 64 65 uint8_t cryptodev_driver_id; 66 uint8_t dpaa2_sec_dp_dump = DPAA2_SEC_DP_ERR_DUMP; 67 68 static inline void 69 free_fle(const struct qbman_fd *fd, struct dpaa2_sec_qp *qp) 70 { 71 struct qbman_fle *fle; 72 struct rte_crypto_op *op; 73 74 if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single) 75 return; 76 77 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); 78 op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1)); 79 /* free the fle memory */ 80 if (likely(rte_pktmbuf_is_contiguous(op->sym->m_src))) 81 rte_mempool_put(qp->fle_pool, (void *)(fle-1)); 82 else 83 rte_free((void *)(fle-1)); 84 } 85 86 static inline int 87 build_proto_compound_sg_fd(dpaa2_sec_session *sess, 88 struct rte_crypto_op *op, 89 struct qbman_fd *fd, uint16_t bpid) 90 { 91 struct rte_crypto_sym_op *sym_op = op->sym; 92 struct ctxt_priv *priv = sess->ctxt; 93 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 94 struct sec_flow_context *flc; 95 struct rte_mbuf *mbuf; 96 uint32_t in_len = 0, out_len = 0; 97 98 if (sym_op->m_dst) 99 mbuf = sym_op->m_dst; 100 else 101 mbuf = sym_op->m_src; 102 103 /* first FLE entry used to store mbuf and session ctxt */ 104 fle = (struct qbman_fle *)rte_malloc(NULL, 105 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 106 RTE_CACHE_LINE_SIZE); 107 if (unlikely(!fle)) { 108 DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE"); 109 return -ENOMEM; 110 } 111 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 112 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 113 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 114 115 /* Save the shared descriptor */ 116 flc = &priv->flc_desc[0].flc; 117 118 op_fle = fle + 1; 119 ip_fle = fle + 2; 120 sge = fle + 3; 121 122 if (likely(bpid < MAX_BPID)) { 123 DPAA2_SET_FD_BPID(fd, bpid); 124 DPAA2_SET_FLE_BPID(op_fle, bpid); 125 DPAA2_SET_FLE_BPID(ip_fle, bpid); 126 } else { 127 DPAA2_SET_FD_IVP(fd); 128 DPAA2_SET_FLE_IVP(op_fle); 129 DPAA2_SET_FLE_IVP(ip_fle); 130 } 131 132 /* Configure FD as a FRAME LIST */ 133 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 134 DPAA2_SET_FD_COMPOUND_FMT(fd); 135 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 136 137 /* Configure Output FLE with Scatter/Gather Entry */ 138 DPAA2_SET_FLE_SG_EXT(op_fle); 139 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 140 141 /* Configure Output SGE for Encap/Decap */ 142 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); 143 /* o/p segs */ 144 while (mbuf->next) { 145 sge->length = mbuf->data_len; 146 out_len += sge->length; 147 sge++; 148 mbuf = mbuf->next; 149 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); 150 } 151 /* using buf_len for last buf - so that extra data can be added */ 152 sge->length = mbuf->buf_len - mbuf->data_off; 153 out_len += sge->length; 154 155 DPAA2_SET_FLE_FIN(sge); 156 op_fle->length = out_len; 157 158 sge++; 159 mbuf = sym_op->m_src; 160 161 /* Configure Input FLE with Scatter/Gather Entry */ 162 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 163 DPAA2_SET_FLE_SG_EXT(ip_fle); 164 DPAA2_SET_FLE_FIN(ip_fle); 165 166 /* Configure input SGE for Encap/Decap */ 167 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); 168 sge->length = mbuf->data_len; 169 in_len += sge->length; 170 171 mbuf = mbuf->next; 172 /* i/p segs */ 173 while (mbuf) { 174 sge++; 175 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); 176 sge->length = mbuf->data_len; 177 in_len += sge->length; 178 mbuf = mbuf->next; 179 } 180 ip_fle->length = in_len; 181 DPAA2_SET_FLE_FIN(sge); 182 183 /* In case of PDCP, per packet HFN is stored in 184 * mbuf priv after sym_op. 185 */ 186 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) { 187 uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op + 188 sess->pdcp.hfn_ovd_offset); 189 /* enable HFN override */ 190 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd); 191 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd); 192 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd); 193 } 194 DPAA2_SET_FD_LEN(fd, ip_fle->length); 195 196 return 0; 197 } 198 199 static inline int 200 build_proto_compound_fd(dpaa2_sec_session *sess, 201 struct rte_crypto_op *op, 202 struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp) 203 { 204 struct rte_crypto_sym_op *sym_op = op->sym; 205 struct ctxt_priv *priv = sess->ctxt; 206 struct qbman_fle *fle, *ip_fle, *op_fle; 207 struct sec_flow_context *flc; 208 struct rte_mbuf *src_mbuf = sym_op->m_src; 209 struct rte_mbuf *dst_mbuf = sym_op->m_dst; 210 int retval; 211 212 if (!dst_mbuf) 213 dst_mbuf = src_mbuf; 214 215 /* Save the shared descriptor */ 216 flc = &priv->flc_desc[0].flc; 217 218 /* we are using the first FLE entry to store Mbuf */ 219 retval = rte_mempool_get(qp->fle_pool, (void **)(&fle)); 220 if (retval) { 221 DPAA2_SEC_DP_DEBUG("Proto: Memory alloc failed"); 222 return -ENOMEM; 223 } 224 memset(fle, 0, FLE_POOL_BUF_SIZE); 225 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 226 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 227 228 op_fle = fle + 1; 229 ip_fle = fle + 2; 230 231 if (likely(bpid < MAX_BPID)) { 232 DPAA2_SET_FD_BPID(fd, bpid); 233 DPAA2_SET_FLE_BPID(op_fle, bpid); 234 DPAA2_SET_FLE_BPID(ip_fle, bpid); 235 } else { 236 DPAA2_SET_FD_IVP(fd); 237 DPAA2_SET_FLE_IVP(op_fle); 238 DPAA2_SET_FLE_IVP(ip_fle); 239 } 240 241 /* Configure FD as a FRAME LIST */ 242 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 243 DPAA2_SET_FD_COMPOUND_FMT(fd); 244 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 245 246 /* Configure Output FLE with dst mbuf data */ 247 DPAA2_SET_FLE_ADDR(op_fle, rte_pktmbuf_iova(dst_mbuf)); 248 DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len); 249 250 /* Configure Input FLE with src mbuf data */ 251 DPAA2_SET_FLE_ADDR(ip_fle, rte_pktmbuf_iova(src_mbuf)); 252 DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len); 253 254 DPAA2_SET_FD_LEN(fd, ip_fle->length); 255 DPAA2_SET_FLE_FIN(ip_fle); 256 257 /* In case of PDCP, per packet HFN is stored in 258 * mbuf priv after sym_op. 259 */ 260 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) { 261 uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op + 262 sess->pdcp.hfn_ovd_offset); 263 /* enable HFN override */ 264 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd); 265 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd); 266 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd); 267 } 268 269 return 0; 270 271 } 272 273 static inline int 274 build_proto_fd(dpaa2_sec_session *sess, 275 struct rte_crypto_op *op, 276 struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp) 277 { 278 struct rte_crypto_sym_op *sym_op = op->sym; 279 if (sym_op->m_dst) 280 return build_proto_compound_fd(sess, op, fd, bpid, qp); 281 282 struct ctxt_priv *priv = sess->ctxt; 283 struct sec_flow_context *flc; 284 struct rte_mbuf *mbuf = sym_op->m_src; 285 286 if (likely(bpid < MAX_BPID)) 287 DPAA2_SET_FD_BPID(fd, bpid); 288 else 289 DPAA2_SET_FD_IVP(fd); 290 291 /* Save the shared descriptor */ 292 flc = &priv->flc_desc[0].flc; 293 294 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 295 DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off); 296 DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len); 297 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 298 299 /* save physical address of mbuf */ 300 op->sym->aead.digest.phys_addr = mbuf->buf_iova; 301 mbuf->buf_iova = (size_t)op; 302 303 return 0; 304 } 305 306 static inline int 307 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess, 308 struct rte_crypto_op *op, 309 struct qbman_fd *fd, __rte_unused uint16_t bpid) 310 { 311 struct rte_crypto_sym_op *sym_op = op->sym; 312 struct ctxt_priv *priv = sess->ctxt; 313 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 314 struct sec_flow_context *flc; 315 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len; 316 int icv_len = sess->digest_length; 317 uint8_t *old_icv; 318 struct rte_mbuf *mbuf; 319 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 320 sess->iv.offset); 321 322 if (sym_op->m_dst) 323 mbuf = sym_op->m_dst; 324 else 325 mbuf = sym_op->m_src; 326 327 /* first FLE entry used to store mbuf and session ctxt */ 328 fle = (struct qbman_fle *)rte_malloc(NULL, 329 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 330 RTE_CACHE_LINE_SIZE); 331 if (unlikely(!fle)) { 332 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE"); 333 return -ENOMEM; 334 } 335 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 336 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 337 DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv); 338 339 op_fle = fle + 1; 340 ip_fle = fle + 2; 341 sge = fle + 3; 342 343 /* Save the shared descriptor */ 344 flc = &priv->flc_desc[0].flc; 345 346 /* Configure FD as a FRAME LIST */ 347 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 348 DPAA2_SET_FD_COMPOUND_FMT(fd); 349 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 350 351 DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d", 352 sym_op->aead.data.offset, 353 sym_op->aead.data.length, 354 sess->digest_length); 355 DPAA2_SEC_DP_DEBUG("iv-len=%d data_off: 0x%x", 356 sess->iv.length, 357 sym_op->m_src->data_off); 358 359 /* Configure Output FLE with Scatter/Gather Entry */ 360 DPAA2_SET_FLE_SG_EXT(op_fle); 361 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 362 363 if (auth_only_len) 364 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len); 365 366 op_fle->length = (sess->dir == DIR_ENC) ? 367 (sym_op->aead.data.length + icv_len) : 368 sym_op->aead.data.length; 369 370 /* Configure Output SGE for Encap/Decap */ 371 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + sym_op->aead.data.offset); 372 sge->length = mbuf->data_len - sym_op->aead.data.offset; 373 374 mbuf = mbuf->next; 375 /* o/p segs */ 376 while (mbuf) { 377 sge++; 378 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); 379 sge->length = mbuf->data_len; 380 mbuf = mbuf->next; 381 } 382 sge->length -= icv_len; 383 384 if (sess->dir == DIR_ENC) { 385 sge++; 386 DPAA2_SET_FLE_ADDR(sge, 387 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data)); 388 sge->length = icv_len; 389 } 390 DPAA2_SET_FLE_FIN(sge); 391 392 sge++; 393 mbuf = sym_op->m_src; 394 395 /* Configure Input FLE with Scatter/Gather Entry */ 396 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 397 DPAA2_SET_FLE_SG_EXT(ip_fle); 398 DPAA2_SET_FLE_FIN(ip_fle); 399 ip_fle->length = (sess->dir == DIR_ENC) ? 400 (sym_op->aead.data.length + sess->iv.length + auth_only_len) : 401 (sym_op->aead.data.length + sess->iv.length + auth_only_len + 402 icv_len); 403 404 /* Configure Input SGE for Encap/Decap */ 405 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr)); 406 sge->length = sess->iv.length; 407 408 sge++; 409 if (auth_only_len) { 410 DPAA2_SET_FLE_ADDR(sge, 411 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data)); 412 sge->length = auth_only_len; 413 sge++; 414 } 415 416 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + sym_op->aead.data.offset); 417 sge->length = mbuf->data_len - sym_op->aead.data.offset; 418 419 mbuf = mbuf->next; 420 /* i/p segs */ 421 while (mbuf) { 422 sge++; 423 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); 424 sge->length = mbuf->data_len; 425 mbuf = mbuf->next; 426 } 427 428 if (sess->dir == DIR_DEC) { 429 sge++; 430 old_icv = (uint8_t *)(sge + 1); 431 memcpy(old_icv, sym_op->aead.digest.data, icv_len); 432 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 433 sge->length = icv_len; 434 } 435 436 DPAA2_SET_FLE_FIN(sge); 437 if (auth_only_len) { 438 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len); 439 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 440 } 441 DPAA2_SET_FD_LEN(fd, ip_fle->length); 442 443 return 0; 444 } 445 446 static inline int 447 build_authenc_gcm_fd(dpaa2_sec_session *sess, 448 struct rte_crypto_op *op, 449 struct qbman_fd *fd, uint16_t bpid, 450 struct dpaa2_sec_qp *qp) 451 { 452 struct rte_crypto_sym_op *sym_op = op->sym; 453 struct ctxt_priv *priv = sess->ctxt; 454 struct qbman_fle *fle, *sge; 455 struct sec_flow_context *flc; 456 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len; 457 int icv_len = sess->digest_length, retval; 458 uint8_t *old_icv; 459 struct rte_mbuf *dst; 460 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 461 sess->iv.offset); 462 463 if (sym_op->m_dst) 464 dst = sym_op->m_dst; 465 else 466 dst = sym_op->m_src; 467 468 /* TODO we are using the first FLE entry to store Mbuf and session ctxt. 469 * Currently we donot know which FLE has the mbuf stored. 470 * So while retreiving we can go back 1 FLE from the FD -ADDR 471 * to get the MBUF Addr from the previous FLE. 472 * We can have a better approach to use the inline Mbuf 473 */ 474 retval = rte_mempool_get(qp->fle_pool, (void **)(&fle)); 475 if (retval) { 476 DPAA2_SEC_DP_DEBUG("GCM: no buffer available in fle pool"); 477 return -ENOMEM; 478 } 479 memset(fle, 0, FLE_POOL_BUF_SIZE); 480 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 481 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 482 fle = fle + 1; 483 sge = fle + 2; 484 if (likely(bpid < MAX_BPID)) { 485 DPAA2_SET_FD_BPID(fd, bpid); 486 DPAA2_SET_FLE_BPID(fle, bpid); 487 DPAA2_SET_FLE_BPID(fle + 1, bpid); 488 DPAA2_SET_FLE_BPID(sge, bpid); 489 DPAA2_SET_FLE_BPID(sge + 1, bpid); 490 DPAA2_SET_FLE_BPID(sge + 2, bpid); 491 DPAA2_SET_FLE_BPID(sge + 3, bpid); 492 } else { 493 DPAA2_SET_FD_IVP(fd); 494 DPAA2_SET_FLE_IVP(fle); 495 DPAA2_SET_FLE_IVP((fle + 1)); 496 DPAA2_SET_FLE_IVP(sge); 497 DPAA2_SET_FLE_IVP((sge + 1)); 498 DPAA2_SET_FLE_IVP((sge + 2)); 499 DPAA2_SET_FLE_IVP((sge + 3)); 500 } 501 502 /* Save the shared descriptor */ 503 flc = &priv->flc_desc[0].flc; 504 /* Configure FD as a FRAME LIST */ 505 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 506 DPAA2_SET_FD_COMPOUND_FMT(fd); 507 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 508 509 DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d", 510 sym_op->aead.data.offset, 511 sym_op->aead.data.length, 512 sess->digest_length); 513 DPAA2_SEC_DP_DEBUG("iv-len=%d data_off: 0x%x", 514 sess->iv.length, 515 sym_op->m_src->data_off); 516 517 /* Configure Output FLE with Scatter/Gather Entry */ 518 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 519 if (auth_only_len) 520 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 521 fle->length = (sess->dir == DIR_ENC) ? 522 (sym_op->aead.data.length + icv_len) : 523 sym_op->aead.data.length; 524 525 DPAA2_SET_FLE_SG_EXT(fle); 526 527 /* Configure Output SGE for Encap/Decap */ 528 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(dst) + sym_op->aead.data.offset); 529 sge->length = sym_op->aead.data.length; 530 531 if (sess->dir == DIR_ENC) { 532 sge++; 533 DPAA2_SET_FLE_ADDR(sge, 534 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data)); 535 sge->length = sess->digest_length; 536 } 537 DPAA2_SET_FLE_FIN(sge); 538 539 sge++; 540 fle++; 541 542 /* Configure Input FLE with Scatter/Gather Entry */ 543 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 544 DPAA2_SET_FLE_SG_EXT(fle); 545 DPAA2_SET_FLE_FIN(fle); 546 fle->length = (sess->dir == DIR_ENC) ? 547 (sym_op->aead.data.length + sess->iv.length + auth_only_len) : 548 (sym_op->aead.data.length + sess->iv.length + auth_only_len + 549 sess->digest_length); 550 551 /* Configure Input SGE for Encap/Decap */ 552 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr)); 553 sge->length = sess->iv.length; 554 sge++; 555 if (auth_only_len) { 556 DPAA2_SET_FLE_ADDR(sge, 557 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data)); 558 sge->length = auth_only_len; 559 DPAA2_SET_FLE_BPID(sge, bpid); 560 sge++; 561 } 562 563 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(sym_op->m_src) + sym_op->aead.data.offset); 564 sge->length = sym_op->aead.data.length; 565 if (sess->dir == DIR_DEC) { 566 sge++; 567 old_icv = (uint8_t *)(sge + 1); 568 memcpy(old_icv, sym_op->aead.digest.data, 569 sess->digest_length); 570 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 571 sge->length = sess->digest_length; 572 } 573 DPAA2_SET_FLE_FIN(sge); 574 575 if (auth_only_len) { 576 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 577 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 578 } 579 580 DPAA2_SET_FD_LEN(fd, fle->length); 581 return 0; 582 } 583 584 static inline int 585 build_authenc_sg_fd(dpaa2_sec_session *sess, 586 struct rte_crypto_op *op, 587 struct qbman_fd *fd, __rte_unused uint16_t bpid) 588 { 589 struct rte_crypto_sym_op *sym_op = op->sym; 590 struct ctxt_priv *priv = sess->ctxt; 591 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 592 struct sec_flow_context *flc; 593 uint16_t auth_hdr_len = sym_op->cipher.data.offset - 594 sym_op->auth.data.offset; 595 uint16_t auth_tail_len = sym_op->auth.data.length - 596 sym_op->cipher.data.length - auth_hdr_len; 597 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len; 598 int icv_len = sess->digest_length; 599 uint8_t *old_icv; 600 struct rte_mbuf *mbuf; 601 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 602 sess->iv.offset); 603 604 if (sym_op->m_dst) 605 mbuf = sym_op->m_dst; 606 else 607 mbuf = sym_op->m_src; 608 609 /* first FLE entry used to store mbuf and session ctxt */ 610 fle = (struct qbman_fle *)rte_malloc(NULL, 611 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 612 RTE_CACHE_LINE_SIZE); 613 if (unlikely(!fle)) { 614 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE"); 615 return -ENOMEM; 616 } 617 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 618 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 619 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 620 621 op_fle = fle + 1; 622 ip_fle = fle + 2; 623 sge = fle + 3; 624 625 /* Save the shared descriptor */ 626 flc = &priv->flc_desc[0].flc; 627 628 /* Configure FD as a FRAME LIST */ 629 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 630 DPAA2_SET_FD_COMPOUND_FMT(fd); 631 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 632 633 DPAA2_SEC_DP_DEBUG( 634 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d", 635 sym_op->auth.data.offset, 636 sym_op->auth.data.length, 637 sess->digest_length); 638 DPAA2_SEC_DP_DEBUG( 639 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x", 640 sym_op->cipher.data.offset, 641 sym_op->cipher.data.length, 642 sess->iv.length, 643 sym_op->m_src->data_off); 644 645 /* Configure Output FLE with Scatter/Gather Entry */ 646 DPAA2_SET_FLE_SG_EXT(op_fle); 647 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 648 649 if (auth_only_len) 650 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len); 651 652 op_fle->length = (sess->dir == DIR_ENC) ? 653 (sym_op->cipher.data.length + icv_len) : 654 sym_op->cipher.data.length; 655 656 /* Configure Output SGE for Encap/Decap */ 657 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + sym_op->auth.data.offset); 658 sge->length = mbuf->data_len - sym_op->auth.data.offset; 659 660 mbuf = mbuf->next; 661 /* o/p segs */ 662 while (mbuf) { 663 sge++; 664 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); 665 sge->length = mbuf->data_len; 666 mbuf = mbuf->next; 667 } 668 sge->length -= icv_len; 669 670 if (sess->dir == DIR_ENC) { 671 sge++; 672 DPAA2_SET_FLE_ADDR(sge, 673 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 674 sge->length = icv_len; 675 } 676 DPAA2_SET_FLE_FIN(sge); 677 678 sge++; 679 mbuf = sym_op->m_src; 680 681 /* Configure Input FLE with Scatter/Gather Entry */ 682 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 683 DPAA2_SET_FLE_SG_EXT(ip_fle); 684 DPAA2_SET_FLE_FIN(ip_fle); 685 ip_fle->length = (sess->dir == DIR_ENC) ? 686 (sym_op->auth.data.length + sess->iv.length) : 687 (sym_op->auth.data.length + sess->iv.length + 688 icv_len); 689 690 /* Configure Input SGE for Encap/Decap */ 691 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 692 sge->length = sess->iv.length; 693 694 sge++; 695 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + sym_op->auth.data.offset); 696 sge->length = mbuf->data_len - sym_op->auth.data.offset; 697 698 mbuf = mbuf->next; 699 /* i/p segs */ 700 while (mbuf) { 701 sge++; 702 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); 703 sge->length = mbuf->data_len; 704 mbuf = mbuf->next; 705 } 706 sge->length -= icv_len; 707 708 if (sess->dir == DIR_DEC) { 709 sge++; 710 old_icv = (uint8_t *)(sge + 1); 711 memcpy(old_icv, sym_op->auth.digest.data, 712 icv_len); 713 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 714 sge->length = icv_len; 715 } 716 717 DPAA2_SET_FLE_FIN(sge); 718 if (auth_only_len) { 719 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len); 720 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 721 } 722 DPAA2_SET_FD_LEN(fd, ip_fle->length); 723 724 return 0; 725 } 726 727 static inline int 728 build_authenc_fd(dpaa2_sec_session *sess, 729 struct rte_crypto_op *op, 730 struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp) 731 { 732 struct rte_crypto_sym_op *sym_op = op->sym; 733 struct ctxt_priv *priv = sess->ctxt; 734 struct qbman_fle *fle, *sge; 735 struct sec_flow_context *flc; 736 uint16_t auth_hdr_len = sym_op->cipher.data.offset - 737 sym_op->auth.data.offset; 738 uint16_t auth_tail_len = sym_op->auth.data.length - 739 sym_op->cipher.data.length - auth_hdr_len; 740 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len; 741 742 int icv_len = sess->digest_length, retval; 743 uint8_t *old_icv; 744 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 745 sess->iv.offset); 746 struct rte_mbuf *dst; 747 748 if (sym_op->m_dst) 749 dst = sym_op->m_dst; 750 else 751 dst = sym_op->m_src; 752 753 /* we are using the first FLE entry to store Mbuf. 754 * Currently we donot know which FLE has the mbuf stored. 755 * So while retreiving we can go back 1 FLE from the FD -ADDR 756 * to get the MBUF Addr from the previous FLE. 757 * We can have a better approach to use the inline Mbuf 758 */ 759 retval = rte_mempool_get(qp->fle_pool, (void **)(&fle)); 760 if (retval) { 761 DPAA2_SEC_DP_DEBUG("AUTHENC: no buffer available in fle pool"); 762 return -ENOMEM; 763 } 764 memset(fle, 0, FLE_POOL_BUF_SIZE); 765 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 766 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 767 fle = fle + 1; 768 sge = fle + 2; 769 if (likely(bpid < MAX_BPID)) { 770 DPAA2_SET_FD_BPID(fd, bpid); 771 DPAA2_SET_FLE_BPID(fle, bpid); 772 DPAA2_SET_FLE_BPID(fle + 1, bpid); 773 DPAA2_SET_FLE_BPID(sge, bpid); 774 DPAA2_SET_FLE_BPID(sge + 1, bpid); 775 DPAA2_SET_FLE_BPID(sge + 2, bpid); 776 DPAA2_SET_FLE_BPID(sge + 3, bpid); 777 } else { 778 DPAA2_SET_FD_IVP(fd); 779 DPAA2_SET_FLE_IVP(fle); 780 DPAA2_SET_FLE_IVP((fle + 1)); 781 DPAA2_SET_FLE_IVP(sge); 782 DPAA2_SET_FLE_IVP((sge + 1)); 783 DPAA2_SET_FLE_IVP((sge + 2)); 784 DPAA2_SET_FLE_IVP((sge + 3)); 785 } 786 787 /* Save the shared descriptor */ 788 flc = &priv->flc_desc[0].flc; 789 /* Configure FD as a FRAME LIST */ 790 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 791 DPAA2_SET_FD_COMPOUND_FMT(fd); 792 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 793 794 DPAA2_SEC_DP_DEBUG( 795 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d", 796 sym_op->auth.data.offset, 797 sym_op->auth.data.length, 798 sess->digest_length); 799 DPAA2_SEC_DP_DEBUG( 800 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x", 801 sym_op->cipher.data.offset, 802 sym_op->cipher.data.length, 803 sess->iv.length, 804 sym_op->m_src->data_off); 805 806 /* Configure Output FLE with Scatter/Gather Entry */ 807 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 808 if (auth_only_len) 809 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 810 fle->length = (sess->dir == DIR_ENC) ? 811 (sym_op->cipher.data.length + icv_len) : 812 sym_op->cipher.data.length; 813 814 DPAA2_SET_FLE_SG_EXT(fle); 815 816 /* Configure Output SGE for Encap/Decap */ 817 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(dst) + sym_op->cipher.data.offset); 818 sge->length = sym_op->cipher.data.length; 819 820 if (sess->dir == DIR_ENC) { 821 sge++; 822 DPAA2_SET_FLE_ADDR(sge, 823 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 824 sge->length = sess->digest_length; 825 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 826 sess->iv.length)); 827 } 828 DPAA2_SET_FLE_FIN(sge); 829 830 sge++; 831 fle++; 832 833 /* Configure Input FLE with Scatter/Gather Entry */ 834 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 835 DPAA2_SET_FLE_SG_EXT(fle); 836 DPAA2_SET_FLE_FIN(fle); 837 fle->length = (sess->dir == DIR_ENC) ? 838 (sym_op->auth.data.length + sess->iv.length) : 839 (sym_op->auth.data.length + sess->iv.length + 840 sess->digest_length); 841 842 /* Configure Input SGE for Encap/Decap */ 843 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 844 sge->length = sess->iv.length; 845 sge++; 846 847 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(sym_op->m_src) + sym_op->auth.data.offset); 848 sge->length = sym_op->auth.data.length; 849 if (sess->dir == DIR_DEC) { 850 sge++; 851 old_icv = (uint8_t *)(sge + 1); 852 memcpy(old_icv, sym_op->auth.digest.data, 853 sess->digest_length); 854 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 855 sge->length = sess->digest_length; 856 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 857 sess->digest_length + 858 sess->iv.length)); 859 } 860 DPAA2_SET_FLE_FIN(sge); 861 if (auth_only_len) { 862 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 863 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 864 } 865 return 0; 866 } 867 868 static inline int build_auth_sg_fd( 869 dpaa2_sec_session *sess, 870 struct rte_crypto_op *op, 871 struct qbman_fd *fd, 872 __rte_unused uint16_t bpid) 873 { 874 struct rte_crypto_sym_op *sym_op = op->sym; 875 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 876 struct sec_flow_context *flc; 877 struct ctxt_priv *priv = sess->ctxt; 878 int data_len, data_offset; 879 uint8_t *old_digest; 880 struct rte_mbuf *mbuf; 881 882 data_len = sym_op->auth.data.length; 883 data_offset = sym_op->auth.data.offset; 884 885 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 886 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 887 if ((data_len & 7) || (data_offset & 7)) { 888 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes"); 889 return -ENOTSUP; 890 } 891 892 data_len = data_len >> 3; 893 data_offset = data_offset >> 3; 894 } 895 896 mbuf = sym_op->m_src; 897 fle = (struct qbman_fle *)rte_malloc(NULL, 898 FLE_SG_MEM_SIZE(mbuf->nb_segs), 899 RTE_CACHE_LINE_SIZE); 900 if (unlikely(!fle)) { 901 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE"); 902 return -ENOMEM; 903 } 904 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs)); 905 /* first FLE entry used to store mbuf and session ctxt */ 906 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 907 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 908 op_fle = fle + 1; 909 ip_fle = fle + 2; 910 sge = fle + 3; 911 912 flc = &priv->flc_desc[DESC_INITFINAL].flc; 913 /* sg FD */ 914 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 915 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 916 DPAA2_SET_FD_COMPOUND_FMT(fd); 917 918 /* o/p fle */ 919 DPAA2_SET_FLE_ADDR(op_fle, 920 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 921 op_fle->length = sess->digest_length; 922 923 /* i/p fle */ 924 DPAA2_SET_FLE_SG_EXT(ip_fle); 925 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 926 ip_fle->length = data_len; 927 928 if (sess->iv.length) { 929 uint8_t *iv_ptr; 930 931 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 932 sess->iv.offset); 933 934 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 935 iv_ptr = conv_to_snow_f9_iv(iv_ptr); 936 sge->length = 12; 937 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 938 iv_ptr = conv_to_zuc_eia_iv(iv_ptr); 939 sge->length = 8; 940 } else { 941 sge->length = sess->iv.length; 942 } 943 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 944 ip_fle->length += sge->length; 945 sge++; 946 } 947 /* i/p 1st seg */ 948 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + data_offset); 949 950 if (data_len <= (mbuf->data_len - data_offset)) { 951 sge->length = data_len; 952 data_len = 0; 953 } else { 954 sge->length = mbuf->data_len - data_offset; 955 956 /* remaining i/p segs */ 957 while ((data_len = data_len - sge->length) && 958 (mbuf = mbuf->next)) { 959 sge++; 960 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); 961 if (data_len > mbuf->data_len) 962 sge->length = mbuf->data_len; 963 else 964 sge->length = data_len; 965 } 966 } 967 968 if (sess->dir == DIR_DEC) { 969 /* Digest verification case */ 970 sge++; 971 old_digest = (uint8_t *)(sge + 1); 972 rte_memcpy(old_digest, sym_op->auth.digest.data, 973 sess->digest_length); 974 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest)); 975 sge->length = sess->digest_length; 976 ip_fle->length += sess->digest_length; 977 } 978 DPAA2_SET_FLE_FIN(sge); 979 DPAA2_SET_FLE_FIN(ip_fle); 980 DPAA2_SET_FD_LEN(fd, ip_fle->length); 981 982 return 0; 983 } 984 985 static inline int 986 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 987 struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp) 988 { 989 struct rte_crypto_sym_op *sym_op = op->sym; 990 struct qbman_fle *fle, *sge; 991 struct sec_flow_context *flc; 992 struct ctxt_priv *priv = sess->ctxt; 993 int data_len, data_offset; 994 uint8_t *old_digest; 995 int retval; 996 997 data_len = sym_op->auth.data.length; 998 data_offset = sym_op->auth.data.offset; 999 1000 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 1001 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 1002 if ((data_len & 7) || (data_offset & 7)) { 1003 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes"); 1004 return -ENOTSUP; 1005 } 1006 1007 data_len = data_len >> 3; 1008 data_offset = data_offset >> 3; 1009 } 1010 1011 retval = rte_mempool_get(qp->fle_pool, (void **)(&fle)); 1012 if (retval) { 1013 DPAA2_SEC_DP_DEBUG("AUTH: no buffer available in fle pool"); 1014 return -ENOMEM; 1015 } 1016 memset(fle, 0, FLE_POOL_BUF_SIZE); 1017 /* TODO we are using the first FLE entry to store Mbuf. 1018 * Currently we donot know which FLE has the mbuf stored. 1019 * So while retreiving we can go back 1 FLE from the FD -ADDR 1020 * to get the MBUF Addr from the previous FLE. 1021 * We can have a better approach to use the inline Mbuf 1022 */ 1023 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 1024 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 1025 fle = fle + 1; 1026 sge = fle + 2; 1027 1028 if (likely(bpid < MAX_BPID)) { 1029 DPAA2_SET_FD_BPID(fd, bpid); 1030 DPAA2_SET_FLE_BPID(fle, bpid); 1031 DPAA2_SET_FLE_BPID(fle + 1, bpid); 1032 DPAA2_SET_FLE_BPID(sge, bpid); 1033 DPAA2_SET_FLE_BPID(sge + 1, bpid); 1034 } else { 1035 DPAA2_SET_FD_IVP(fd); 1036 DPAA2_SET_FLE_IVP(fle); 1037 DPAA2_SET_FLE_IVP((fle + 1)); 1038 DPAA2_SET_FLE_IVP(sge); 1039 DPAA2_SET_FLE_IVP((sge + 1)); 1040 } 1041 1042 flc = &priv->flc_desc[DESC_INITFINAL].flc; 1043 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1044 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 1045 DPAA2_SET_FD_COMPOUND_FMT(fd); 1046 1047 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 1048 fle->length = sess->digest_length; 1049 fle++; 1050 1051 /* Setting input FLE */ 1052 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 1053 DPAA2_SET_FLE_SG_EXT(fle); 1054 fle->length = data_len; 1055 1056 if (sess->iv.length) { 1057 uint8_t *iv_ptr; 1058 1059 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1060 sess->iv.offset); 1061 1062 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 1063 iv_ptr = conv_to_snow_f9_iv(iv_ptr); 1064 sge->length = 12; 1065 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 1066 iv_ptr = conv_to_zuc_eia_iv(iv_ptr); 1067 sge->length = 8; 1068 } else { 1069 sge->length = sess->iv.length; 1070 } 1071 1072 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1073 fle->length = fle->length + sge->length; 1074 sge++; 1075 } 1076 1077 /* Setting data to authenticate */ 1078 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(sym_op->m_src) + data_offset); 1079 sge->length = data_len; 1080 1081 if (sess->dir == DIR_DEC) { 1082 sge++; 1083 old_digest = (uint8_t *)(sge + 1); 1084 rte_memcpy(old_digest, sym_op->auth.digest.data, 1085 sess->digest_length); 1086 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest)); 1087 sge->length = sess->digest_length; 1088 fle->length = fle->length + sess->digest_length; 1089 } 1090 1091 DPAA2_SET_FLE_FIN(sge); 1092 DPAA2_SET_FLE_FIN(fle); 1093 DPAA2_SET_FD_LEN(fd, fle->length); 1094 1095 return 0; 1096 } 1097 1098 static int 1099 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 1100 struct qbman_fd *fd, uint16_t bpid) 1101 { 1102 struct rte_crypto_sym_op *sym_op = op->sym; 1103 struct qbman_fle *ip_fle, *op_fle, *sge, *fle; 1104 int data_len, data_offset; 1105 struct sec_flow_context *flc; 1106 struct ctxt_priv *priv = sess->ctxt; 1107 struct rte_mbuf *mbuf; 1108 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1109 sess->iv.offset); 1110 #if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL) 1111 char debug_str[1024]; 1112 int offset; 1113 #endif 1114 1115 data_len = sym_op->cipher.data.length; 1116 data_offset = sym_op->cipher.data.offset; 1117 1118 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 1119 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 1120 if ((data_len & 7) || (data_offset & 7)) { 1121 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes"); 1122 return -ENOTSUP; 1123 } 1124 1125 data_len = data_len >> 3; 1126 data_offset = data_offset >> 3; 1127 } 1128 1129 if (sym_op->m_dst) 1130 mbuf = sym_op->m_dst; 1131 else 1132 mbuf = sym_op->m_src; 1133 1134 /* first FLE entry used to store mbuf and session ctxt */ 1135 fle = (struct qbman_fle *)rte_malloc(NULL, 1136 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 1137 RTE_CACHE_LINE_SIZE); 1138 if (!fle) { 1139 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE"); 1140 return -ENOMEM; 1141 } 1142 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 1143 /* first FLE entry used to store mbuf and session ctxt */ 1144 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 1145 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 1146 1147 op_fle = fle + 1; 1148 ip_fle = fle + 2; 1149 sge = fle + 3; 1150 1151 flc = &priv->flc_desc[0].flc; 1152 1153 DPAA2_SEC_DP_DEBUG( 1154 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d" 1155 " data_off: 0x%x", 1156 data_offset, 1157 data_len, 1158 sess->iv.length, 1159 sym_op->m_src->data_off); 1160 1161 /* o/p fle */ 1162 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 1163 op_fle->length = data_len; 1164 DPAA2_SET_FLE_SG_EXT(op_fle); 1165 1166 /* o/p 1st seg */ 1167 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + data_offset); 1168 sge->length = mbuf->data_len - data_offset; 1169 1170 mbuf = mbuf->next; 1171 /* o/p segs */ 1172 while (mbuf) { 1173 sge++; 1174 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); 1175 sge->length = mbuf->data_len; 1176 mbuf = mbuf->next; 1177 } 1178 DPAA2_SET_FLE_FIN(sge); 1179 1180 DPAA2_SEC_DP_DEBUG( 1181 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d", 1182 flc, fle, fle->addr_hi, fle->addr_lo, 1183 fle->length); 1184 1185 /* i/p fle */ 1186 mbuf = sym_op->m_src; 1187 sge++; 1188 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 1189 ip_fle->length = sess->iv.length + data_len; 1190 DPAA2_SET_FLE_SG_EXT(ip_fle); 1191 1192 /* i/p IV */ 1193 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1194 sge->length = sess->iv.length; 1195 1196 sge++; 1197 1198 /* i/p 1st seg */ 1199 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + data_offset); 1200 sge->length = mbuf->data_len - data_offset; 1201 1202 mbuf = mbuf->next; 1203 /* i/p segs */ 1204 while (mbuf) { 1205 sge++; 1206 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf)); 1207 sge->length = mbuf->data_len; 1208 mbuf = mbuf->next; 1209 } 1210 DPAA2_SET_FLE_FIN(sge); 1211 DPAA2_SET_FLE_FIN(ip_fle); 1212 1213 /* sg fd */ 1214 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 1215 DPAA2_SET_FD_LEN(fd, ip_fle->length); 1216 DPAA2_SET_FD_COMPOUND_FMT(fd); 1217 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1218 1219 #if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL) 1220 offset = sprintf(debug_str, 1221 "CIPHER SG: fdaddr =%" PRIx64 ", from %s pool ", 1222 DPAA2_GET_FD_ADDR(fd), 1223 bpid < MAX_BPID ? "SW" : "BMAN"); 1224 if (bpid < MAX_BPID) { 1225 offset += sprintf(&debug_str[offset], 1226 "bpid = %d ", bpid); 1227 } 1228 offset += sprintf(&debug_str[offset], 1229 "private size = %d ", 1230 mbuf->pool->private_data_size); 1231 offset += sprintf(&debug_str[offset], 1232 "off =%d, len =%d", 1233 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_LEN(fd)); 1234 DPAA2_SEC_DP_DEBUG("%s", debug_str); 1235 #else 1236 RTE_SET_USED(bpid); 1237 #endif 1238 1239 return 0; 1240 } 1241 1242 static int 1243 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 1244 struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp) 1245 { 1246 struct rte_crypto_sym_op *sym_op = op->sym; 1247 struct qbman_fle *fle, *sge; 1248 int retval, data_len, data_offset; 1249 struct sec_flow_context *flc; 1250 struct ctxt_priv *priv = sess->ctxt; 1251 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1252 sess->iv.offset); 1253 struct rte_mbuf *dst; 1254 #if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL) 1255 char debug_str[1024]; 1256 int offset; 1257 #endif 1258 1259 data_len = sym_op->cipher.data.length; 1260 data_offset = sym_op->cipher.data.offset; 1261 1262 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 1263 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 1264 if ((data_len & 7) || (data_offset & 7)) { 1265 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes"); 1266 return -ENOTSUP; 1267 } 1268 1269 data_len = data_len >> 3; 1270 data_offset = data_offset >> 3; 1271 } 1272 1273 if (sym_op->m_dst) 1274 dst = sym_op->m_dst; 1275 else 1276 dst = sym_op->m_src; 1277 1278 retval = rte_mempool_get(qp->fle_pool, (void **)(&fle)); 1279 if (retval) { 1280 DPAA2_SEC_DP_DEBUG("CIPHER: no buffer available in fle pool"); 1281 return -ENOMEM; 1282 } 1283 memset(fle, 0, FLE_POOL_BUF_SIZE); 1284 /* TODO we are using the first FLE entry to store Mbuf. 1285 * Currently we donot know which FLE has the mbuf stored. 1286 * So while retreiving we can go back 1 FLE from the FD -ADDR 1287 * to get the MBUF Addr from the previous FLE. 1288 * We can have a better approach to use the inline Mbuf 1289 */ 1290 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 1291 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 1292 fle = fle + 1; 1293 sge = fle + 2; 1294 1295 if (likely(bpid < MAX_BPID)) { 1296 DPAA2_SET_FD_BPID(fd, bpid); 1297 DPAA2_SET_FLE_BPID(fle, bpid); 1298 DPAA2_SET_FLE_BPID(fle + 1, bpid); 1299 DPAA2_SET_FLE_BPID(sge, bpid); 1300 DPAA2_SET_FLE_BPID(sge + 1, bpid); 1301 } else { 1302 DPAA2_SET_FD_IVP(fd); 1303 DPAA2_SET_FLE_IVP(fle); 1304 DPAA2_SET_FLE_IVP((fle + 1)); 1305 DPAA2_SET_FLE_IVP(sge); 1306 DPAA2_SET_FLE_IVP((sge + 1)); 1307 } 1308 1309 flc = &priv->flc_desc[0].flc; 1310 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 1311 DPAA2_SET_FD_LEN(fd, data_len + sess->iv.length); 1312 DPAA2_SET_FD_COMPOUND_FMT(fd); 1313 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1314 1315 DPAA2_SEC_DP_DEBUG( 1316 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d," 1317 " data_off: 0x%x", 1318 data_offset, 1319 data_len, 1320 sess->iv.length, 1321 sym_op->m_src->data_off); 1322 1323 DPAA2_SET_FLE_ADDR(fle, rte_pktmbuf_iova(dst) + data_offset); 1324 1325 fle->length = data_len + sess->iv.length; 1326 1327 DPAA2_SEC_DP_DEBUG( 1328 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d", 1329 flc, fle, fle->addr_hi, fle->addr_lo, 1330 fle->length); 1331 1332 fle++; 1333 1334 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 1335 fle->length = data_len + sess->iv.length; 1336 1337 DPAA2_SET_FLE_SG_EXT(fle); 1338 1339 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1340 sge->length = sess->iv.length; 1341 1342 sge++; 1343 DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(sym_op->m_src) + data_offset); 1344 1345 sge->length = data_len; 1346 DPAA2_SET_FLE_FIN(sge); 1347 DPAA2_SET_FLE_FIN(fle); 1348 1349 #if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL) 1350 offset = sprintf(debug_str, 1351 "CIPHER: fdaddr =%" PRIx64 ", from %s pool ", 1352 DPAA2_GET_FD_ADDR(fd), 1353 bpid < MAX_BPID ? "SW" : "BMAN"); 1354 if (bpid < MAX_BPID) { 1355 offset += sprintf(&debug_str[offset], 1356 "bpid = %d ", bpid); 1357 } 1358 offset += sprintf(&debug_str[offset], 1359 "private size = %d ", 1360 dst->pool->private_data_size); 1361 offset += sprintf(&debug_str[offset], 1362 "off =%d, len =%d", 1363 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_LEN(fd)); 1364 DPAA2_SEC_DP_DEBUG("%s", debug_str); 1365 #endif 1366 1367 return 0; 1368 } 1369 1370 static inline int 1371 build_sec_fd(struct rte_crypto_op *op, 1372 struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp) 1373 { 1374 int ret = -1; 1375 dpaa2_sec_session *sess; 1376 1377 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { 1378 sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session); 1379 } else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 1380 sess = SECURITY_GET_SESS_PRIV(op->sym->session); 1381 } else { 1382 DPAA2_SEC_DP_ERR("Session type invalid"); 1383 return -ENOTSUP; 1384 } 1385 1386 if (!sess) { 1387 DPAA2_SEC_DP_ERR("Session not available"); 1388 return -EINVAL; 1389 } 1390 1391 /* Any of the buffer is segmented*/ 1392 if (!rte_pktmbuf_is_contiguous(op->sym->m_src) || 1393 ((op->sym->m_dst != NULL) && 1394 !rte_pktmbuf_is_contiguous(op->sym->m_dst))) { 1395 switch (sess->ctxt_type) { 1396 case DPAA2_SEC_CIPHER: 1397 ret = build_cipher_sg_fd(sess, op, fd, bpid); 1398 break; 1399 case DPAA2_SEC_AUTH: 1400 ret = build_auth_sg_fd(sess, op, fd, bpid); 1401 break; 1402 case DPAA2_SEC_AEAD: 1403 ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid); 1404 break; 1405 case DPAA2_SEC_CIPHER_HASH: 1406 ret = build_authenc_sg_fd(sess, op, fd, bpid); 1407 break; 1408 case DPAA2_SEC_IPSEC: 1409 case DPAA2_SEC_PDCP: 1410 ret = build_proto_compound_sg_fd(sess, op, fd, bpid); 1411 break; 1412 default: 1413 DPAA2_SEC_ERR("error: Unsupported session %d", 1414 sess->ctxt_type); 1415 ret = -ENOTSUP; 1416 } 1417 } else { 1418 switch (sess->ctxt_type) { 1419 case DPAA2_SEC_CIPHER: 1420 ret = build_cipher_fd(sess, op, fd, bpid, qp); 1421 break; 1422 case DPAA2_SEC_AUTH: 1423 ret = build_auth_fd(sess, op, fd, bpid, qp); 1424 break; 1425 case DPAA2_SEC_AEAD: 1426 ret = build_authenc_gcm_fd(sess, op, fd, bpid, qp); 1427 break; 1428 case DPAA2_SEC_CIPHER_HASH: 1429 ret = build_authenc_fd(sess, op, fd, bpid, qp); 1430 break; 1431 case DPAA2_SEC_IPSEC: 1432 ret = build_proto_fd(sess, op, fd, bpid, qp); 1433 break; 1434 case DPAA2_SEC_PDCP: 1435 ret = build_proto_compound_fd(sess, op, fd, bpid, qp); 1436 break; 1437 default: 1438 DPAA2_SEC_ERR("error: Unsupported session%d", 1439 sess->ctxt_type); 1440 ret = -ENOTSUP; 1441 } 1442 } 1443 return ret; 1444 } 1445 1446 static uint16_t 1447 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, 1448 uint16_t nb_ops) 1449 { 1450 /* Function to transmit the frames to given device and VQ*/ 1451 uint32_t loop; 1452 int32_t ret; 1453 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 1454 uint32_t frames_to_send, retry_count; 1455 struct qbman_eq_desc eqdesc; 1456 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 1457 struct qbman_swp *swp; 1458 uint16_t num_tx = 0; 1459 uint32_t flags[MAX_TX_RING_SLOTS] = {0}; 1460 /*todo - need to support multiple buffer pools */ 1461 uint16_t bpid; 1462 struct rte_mempool *mb_pool; 1463 1464 if (unlikely(nb_ops == 0)) 1465 return 0; 1466 1467 if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { 1468 DPAA2_SEC_ERR("sessionless crypto op not supported"); 1469 return 0; 1470 } 1471 /*Prepare enqueue descriptor*/ 1472 qbman_eq_desc_clear(&eqdesc); 1473 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); 1474 qbman_eq_desc_set_response(&eqdesc, 0, 0); 1475 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid); 1476 1477 if (!DPAA2_PER_LCORE_DPIO) { 1478 ret = dpaa2_affine_qbman_swp(); 1479 if (ret) { 1480 DPAA2_SEC_ERR( 1481 "Failed to allocate IO portal, tid: %d", 1482 rte_gettid()); 1483 return 0; 1484 } 1485 } 1486 swp = DPAA2_PER_LCORE_PORTAL; 1487 1488 while (nb_ops) { 1489 frames_to_send = (nb_ops > dpaa2_eqcr_size) ? 1490 dpaa2_eqcr_size : nb_ops; 1491 1492 for (loop = 0; loop < frames_to_send; loop++) { 1493 if (*dpaa2_seqn((*ops)->sym->m_src)) { 1494 if (*dpaa2_seqn((*ops)->sym->m_src) & QBMAN_ENQUEUE_FLAG_DCA) { 1495 DPAA2_PER_LCORE_DQRR_SIZE--; 1496 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << 1497 *dpaa2_seqn((*ops)->sym->m_src) & 1498 QBMAN_EQCR_DCA_IDXMASK); 1499 } 1500 flags[loop] = *dpaa2_seqn((*ops)->sym->m_src); 1501 *dpaa2_seqn((*ops)->sym->m_src) = DPAA2_INVALID_MBUF_SEQN; 1502 } 1503 1504 /*Clear the unused FD fields before sending*/ 1505 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd)); 1506 mb_pool = (*ops)->sym->m_src->pool; 1507 bpid = mempool_to_bpid(mb_pool); 1508 ret = build_sec_fd(*ops, &fd_arr[loop], bpid, dpaa2_qp); 1509 if (ret) { 1510 DPAA2_SEC_DP_DEBUG("FD build failed"); 1511 goto skip_tx; 1512 } 1513 ops++; 1514 } 1515 1516 loop = 0; 1517 retry_count = 0; 1518 while (loop < frames_to_send) { 1519 ret = qbman_swp_enqueue_multiple(swp, &eqdesc, 1520 &fd_arr[loop], 1521 &flags[loop], 1522 frames_to_send - loop); 1523 if (unlikely(ret < 0)) { 1524 retry_count++; 1525 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) { 1526 num_tx += loop; 1527 nb_ops -= loop; 1528 DPAA2_SEC_DP_DEBUG("Enqueue fail"); 1529 /* freeing the fle buffers */ 1530 while (loop < frames_to_send) { 1531 free_fle(&fd_arr[loop], 1532 dpaa2_qp); 1533 loop++; 1534 } 1535 goto skip_tx; 1536 } 1537 } else { 1538 loop += ret; 1539 retry_count = 0; 1540 } 1541 } 1542 1543 num_tx += loop; 1544 nb_ops -= loop; 1545 } 1546 skip_tx: 1547 dpaa2_qp->tx_vq.tx_pkts += num_tx; 1548 dpaa2_qp->tx_vq.err_pkts += nb_ops; 1549 return num_tx; 1550 } 1551 1552 static inline struct rte_crypto_op * 1553 sec_simple_fd_to_mbuf(const struct qbman_fd *fd) 1554 { 1555 struct rte_crypto_op *op; 1556 uint16_t len = DPAA2_GET_FD_LEN(fd); 1557 int16_t diff = 0; 1558 dpaa2_sec_session *sess_priv __rte_unused; 1559 1560 if (unlikely(DPAA2_GET_FD_IVP(fd))) { 1561 DPAA2_SEC_ERR("error: non inline buffer"); 1562 return NULL; 1563 } 1564 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF( 1565 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)), 1566 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 1567 1568 diff = len - mbuf->pkt_len; 1569 mbuf->pkt_len += diff; 1570 mbuf->data_len += diff; 1571 op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova; 1572 mbuf->buf_iova = op->sym->aead.digest.phys_addr; 1573 op->sym->aead.digest.phys_addr = 0L; 1574 1575 sess_priv = SECURITY_GET_SESS_PRIV(op->sym->session); 1576 if (sess_priv->dir == DIR_ENC) 1577 mbuf->data_off += SEC_FLC_DHR_OUTBOUND; 1578 else 1579 mbuf->data_off += SEC_FLC_DHR_INBOUND; 1580 1581 if (unlikely(fd->simple.frc)) { 1582 DPAA2_SEC_ERR("SEC returned Error - %x", 1583 fd->simple.frc); 1584 op->status = RTE_CRYPTO_OP_STATUS_ERROR; 1585 } else { 1586 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 1587 } 1588 1589 return op; 1590 } 1591 1592 static inline struct rte_crypto_op * 1593 sec_fd_to_mbuf(const struct qbman_fd *fd, struct dpaa2_sec_qp *qp) 1594 { 1595 struct qbman_fle *fle; 1596 struct rte_crypto_op *op; 1597 struct rte_mbuf *dst, *src; 1598 #if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL) 1599 char debug_str[1024]; 1600 int offset; 1601 #endif 1602 1603 if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single) 1604 return sec_simple_fd_to_mbuf(fd); 1605 1606 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); 1607 1608 DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x", 1609 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset); 1610 1611 /* we are using the first FLE entry to store Mbuf. 1612 * Currently we donot know which FLE has the mbuf stored. 1613 * So while retreiving we can go back 1 FLE from the FD -ADDR 1614 * to get the MBUF Addr from the previous FLE. 1615 * We can have a better approach to use the inline Mbuf 1616 */ 1617 1618 op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1)); 1619 1620 /* Prefeth op */ 1621 src = op->sym->m_src; 1622 rte_prefetch0(src); 1623 1624 if (op->sym->m_dst) { 1625 dst = op->sym->m_dst; 1626 rte_prefetch0(dst); 1627 } else 1628 dst = src; 1629 1630 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 1631 uint16_t len = DPAA2_GET_FD_LEN(fd); 1632 dst->pkt_len = len; 1633 while (dst->next != NULL) { 1634 len -= dst->data_len; 1635 dst = dst->next; 1636 } 1637 dst->data_len = len; 1638 } 1639 1640 #if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL) 1641 offset = sprintf(debug_str, "Mbuf %p from %s pool ", 1642 dst, DPAA2_GET_FD_IVP(fd) ? "SW" : "BMAN"); 1643 if (!DPAA2_GET_FD_IVP(fd)) { 1644 offset += sprintf(&debug_str[offset], "bpid = %d ", 1645 DPAA2_GET_FD_BPID(fd)); 1646 } 1647 offset += sprintf(&debug_str[offset], 1648 "private size = %d ", dst->pool->private_data_size); 1649 offset += sprintf(&debug_str[offset], 1650 "addr %p, fdaddr =%" PRIx64 ", off =%d, len =%d", 1651 dst->buf_addr, DPAA2_GET_FD_ADDR(fd), 1652 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_LEN(fd)); 1653 DPAA2_SEC_DP_DEBUG("%s", debug_str); 1654 #endif 1655 1656 /* free the fle memory */ 1657 if (likely(rte_pktmbuf_is_contiguous(src))) { 1658 rte_mempool_put(qp->fle_pool, (void *)(fle-1)); 1659 } else 1660 rte_free((void *)(fle-1)); 1661 1662 return op; 1663 } 1664 1665 static void 1666 dpaa2_sec_dump(struct rte_crypto_op *op, FILE *f) 1667 { 1668 int i; 1669 dpaa2_sec_session *sess = NULL; 1670 struct ctxt_priv *priv; 1671 uint8_t bufsize; 1672 struct rte_crypto_sym_op *sym_op; 1673 1674 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) 1675 sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session); 1676 #ifdef RTE_LIB_SECURITY 1677 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) 1678 sess = SECURITY_GET_SESS_PRIV(op->sym->session); 1679 #endif 1680 1681 if (sess == NULL) 1682 goto mbuf_dump; 1683 1684 priv = (struct ctxt_priv *)sess->ctxt; 1685 fprintf(f, "\n****************************************\n" 1686 "session params:\n\tContext type:\t%d\n\tDirection:\t%s\n" 1687 "\tCipher alg:\t%d\n\tAuth alg:\t%d\n\tAead alg:\t%d\n" 1688 "\tCipher key len:\t%zd\n", sess->ctxt_type, 1689 (sess->dir == DIR_ENC) ? "DIR_ENC" : "DIR_DEC", 1690 sess->cipher_alg, sess->auth_alg, sess->aead_alg, 1691 sess->cipher_key.length); 1692 rte_hexdump(f, "cipher key", sess->cipher_key.data, 1693 sess->cipher_key.length); 1694 rte_hexdump(f, "auth key", sess->auth_key.data, 1695 sess->auth_key.length); 1696 fprintf(f, "\tAuth key len:\t%zd\n\tIV len:\t\t%d\n\tIV offset:\t%d\n" 1697 "\tdigest length:\t%d\n\tstatus:\t\t%d\n\taead auth only" 1698 " len:\t%d\n\taead cipher text:\t%d\n", 1699 sess->auth_key.length, sess->iv.length, sess->iv.offset, 1700 sess->digest_length, sess->status, 1701 sess->ext_params.aead_ctxt.auth_only_len, 1702 sess->ext_params.aead_ctxt.auth_cipher_text); 1703 #ifdef RTE_LIB_SECURITY 1704 fprintf(f, "PDCP session params:\n" 1705 "\tDomain:\t\t%d\n\tBearer:\t\t%d\n\tpkt_dir:\t%d\n\thfn_ovd:" 1706 "\t%d\n\tsn_size:\t%d\n\thfn_ovd_offset:\t%d\n\thfn:\t\t%d\n" 1707 "\thfn_threshold:\t0x%x\n", sess->pdcp.domain, 1708 sess->pdcp.bearer, sess->pdcp.pkt_dir, sess->pdcp.hfn_ovd, 1709 sess->pdcp.sn_size, sess->pdcp.hfn_ovd_offset, sess->pdcp.hfn, 1710 sess->pdcp.hfn_threshold); 1711 1712 #endif 1713 bufsize = (uint8_t)priv->flc_desc[0].flc.word1_sdl; 1714 fprintf(f, "Descriptor Dump:\n"); 1715 for (i = 0; i < bufsize; i++) 1716 fprintf(f, "\tDESC[%d]:0x%x\n", i, priv->flc_desc[0].desc[i]); 1717 1718 fprintf(f, "\n"); 1719 mbuf_dump: 1720 sym_op = op->sym; 1721 if (sym_op->m_src) { 1722 fprintf(f, "Source mbuf:\n"); 1723 rte_pktmbuf_dump(f, sym_op->m_src, sym_op->m_src->data_len); 1724 } 1725 if (sym_op->m_dst) { 1726 fprintf(f, "Destination mbuf:\n"); 1727 rte_pktmbuf_dump(f, sym_op->m_dst, sym_op->m_dst->data_len); 1728 } 1729 1730 fprintf(f, "Session address = %p\ncipher offset: %d, length: %d\n" 1731 "auth offset: %d, length: %d\n aead offset: %d, length: %d\n" 1732 , sym_op->session, 1733 sym_op->cipher.data.offset, sym_op->cipher.data.length, 1734 sym_op->auth.data.offset, sym_op->auth.data.length, 1735 sym_op->aead.data.offset, sym_op->aead.data.length); 1736 fprintf(f, "\n"); 1737 1738 } 1739 1740 static void 1741 dpaa2_sec_free_eqresp_buf(uint16_t eqresp_ci, 1742 struct dpaa2_queue *dpaa2_q) 1743 { 1744 struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO; 1745 struct rte_crypto_op *op; 1746 struct qbman_fd *fd; 1747 struct dpaa2_sec_qp *dpaa2_qp; 1748 1749 dpaa2_qp = container_of(dpaa2_q, struct dpaa2_sec_qp, tx_vq); 1750 fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]); 1751 op = sec_fd_to_mbuf(fd, dpaa2_qp); 1752 /* Instead of freeing, enqueue it to the sec tx queue (sec->core) 1753 * after setting an error in FD. But this will have performance impact. 1754 */ 1755 rte_pktmbuf_free(op->sym->m_src); 1756 } 1757 1758 static void 1759 dpaa2_sec_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q, 1760 struct rte_mbuf *m, 1761 struct qbman_eq_desc *eqdesc) 1762 { 1763 struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO; 1764 struct eqresp_metadata *eqresp_meta; 1765 struct dpaa2_sec_dev_private *priv = dpaa2_q->crypto_data->dev_private; 1766 uint16_t orpid, seqnum; 1767 uint8_t dq_idx; 1768 1769 if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) { 1770 orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >> 1771 DPAA2_EQCR_OPRID_SHIFT; 1772 seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >> 1773 DPAA2_EQCR_SEQNUM_SHIFT; 1774 1775 1776 if (!priv->en_loose_ordered) { 1777 qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0); 1778 qbman_eq_desc_set_response(eqdesc, (uint64_t) 1779 DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[ 1780 dpio_dev->eqresp_pi]), 1); 1781 qbman_eq_desc_set_token(eqdesc, 1); 1782 1783 eqresp_meta = &dpio_dev->eqresp_meta[dpio_dev->eqresp_pi]; 1784 eqresp_meta->dpaa2_q = dpaa2_q; 1785 eqresp_meta->mp = m->pool; 1786 1787 dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ? 1788 dpio_dev->eqresp_pi++ : (dpio_dev->eqresp_pi = 0); 1789 } else { 1790 qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0); 1791 } 1792 } else { 1793 dq_idx = *dpaa2_seqn(m) - 1; 1794 qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0); 1795 DPAA2_PER_LCORE_DQRR_SIZE--; 1796 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx); 1797 } 1798 *dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN; 1799 } 1800 1801 1802 static uint16_t 1803 dpaa2_sec_enqueue_burst_ordered(void *qp, struct rte_crypto_op **ops, 1804 uint16_t nb_ops) 1805 { 1806 /* Function to transmit the frames to given device and VQ*/ 1807 uint32_t loop; 1808 int32_t ret; 1809 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 1810 uint32_t frames_to_send, num_free_eq_desc, retry_count; 1811 struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS]; 1812 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 1813 struct qbman_swp *swp; 1814 uint16_t num_tx = 0; 1815 uint16_t bpid; 1816 struct rte_mempool *mb_pool; 1817 struct dpaa2_sec_dev_private *priv = 1818 dpaa2_qp->tx_vq.crypto_data->dev_private; 1819 1820 if (unlikely(nb_ops == 0)) 1821 return 0; 1822 1823 if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { 1824 DPAA2_SEC_ERR("sessionless crypto op not supported"); 1825 return 0; 1826 } 1827 1828 if (!DPAA2_PER_LCORE_DPIO) { 1829 ret = dpaa2_affine_qbman_swp(); 1830 if (ret) { 1831 DPAA2_SEC_ERR("Failure in affining portal"); 1832 return 0; 1833 } 1834 } 1835 swp = DPAA2_PER_LCORE_PORTAL; 1836 1837 while (nb_ops) { 1838 frames_to_send = (nb_ops > dpaa2_eqcr_size) ? 1839 dpaa2_eqcr_size : nb_ops; 1840 1841 if (!priv->en_loose_ordered) { 1842 if (*dpaa2_seqn((*ops)->sym->m_src)) { 1843 num_free_eq_desc = dpaa2_free_eq_descriptors(); 1844 if (num_free_eq_desc < frames_to_send) 1845 frames_to_send = num_free_eq_desc; 1846 } 1847 } 1848 1849 for (loop = 0; loop < frames_to_send; loop++) { 1850 /*Prepare enqueue descriptor*/ 1851 qbman_eq_desc_clear(&eqdesc[loop]); 1852 qbman_eq_desc_set_fq(&eqdesc[loop], dpaa2_qp->tx_vq.fqid); 1853 1854 if (*dpaa2_seqn((*ops)->sym->m_src)) 1855 dpaa2_sec_set_enqueue_descriptor( 1856 &dpaa2_qp->tx_vq, 1857 (*ops)->sym->m_src, 1858 &eqdesc[loop]); 1859 else 1860 qbman_eq_desc_set_no_orp(&eqdesc[loop], 1861 DPAA2_EQ_RESP_ERR_FQ); 1862 1863 /*Clear the unused FD fields before sending*/ 1864 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd)); 1865 mb_pool = (*ops)->sym->m_src->pool; 1866 bpid = mempool_to_bpid(mb_pool); 1867 ret = build_sec_fd(*ops, &fd_arr[loop], bpid, dpaa2_qp); 1868 if (ret) { 1869 DPAA2_SEC_DP_DEBUG("FD build failed"); 1870 goto skip_tx; 1871 } 1872 ops++; 1873 } 1874 1875 loop = 0; 1876 retry_count = 0; 1877 while (loop < frames_to_send) { 1878 ret = qbman_swp_enqueue_multiple_desc(swp, 1879 &eqdesc[loop], &fd_arr[loop], 1880 frames_to_send - loop); 1881 if (unlikely(ret < 0)) { 1882 retry_count++; 1883 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) { 1884 num_tx += loop; 1885 nb_ops -= loop; 1886 DPAA2_SEC_DP_DEBUG("Enqueue fail"); 1887 /* freeing the fle buffers */ 1888 while (loop < frames_to_send) { 1889 free_fle(&fd_arr[loop], 1890 dpaa2_qp); 1891 loop++; 1892 } 1893 goto skip_tx; 1894 } 1895 } else { 1896 loop += ret; 1897 retry_count = 0; 1898 } 1899 } 1900 1901 num_tx += loop; 1902 nb_ops -= loop; 1903 } 1904 1905 skip_tx: 1906 dpaa2_qp->tx_vq.tx_pkts += num_tx; 1907 dpaa2_qp->tx_vq.err_pkts += nb_ops; 1908 return num_tx; 1909 } 1910 1911 static uint16_t 1912 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, 1913 uint16_t nb_ops) 1914 { 1915 /* Function is responsible to receive frames for a given device and VQ*/ 1916 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 1917 struct qbman_result *dq_storage; 1918 uint32_t fqid = dpaa2_qp->rx_vq.fqid; 1919 int ret, num_rx = 0; 1920 uint8_t is_last = 0, status; 1921 struct qbman_swp *swp; 1922 const struct qbman_fd *fd; 1923 struct qbman_pull_desc pulldesc; 1924 1925 if (!DPAA2_PER_LCORE_DPIO) { 1926 ret = dpaa2_affine_qbman_swp(); 1927 if (ret) { 1928 DPAA2_SEC_ERR( 1929 "Failed to allocate IO portal, tid: %d", 1930 rte_gettid()); 1931 return 0; 1932 } 1933 } 1934 swp = DPAA2_PER_LCORE_PORTAL; 1935 dq_storage = dpaa2_qp->rx_vq.q_storage[0]->dq_storage[0]; 1936 1937 qbman_pull_desc_clear(&pulldesc); 1938 qbman_pull_desc_set_numframes(&pulldesc, 1939 (nb_ops > dpaa2_dqrr_size) ? 1940 dpaa2_dqrr_size : nb_ops); 1941 qbman_pull_desc_set_fq(&pulldesc, fqid); 1942 qbman_pull_desc_set_storage(&pulldesc, dq_storage, 1943 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage), 1944 1); 1945 1946 /*Issue a volatile dequeue command. */ 1947 while (1) { 1948 if (qbman_swp_pull(swp, &pulldesc)) { 1949 DPAA2_SEC_WARN( 1950 "SEC VDQ command is not issued : QBMAN busy"); 1951 /* Portal was busy, try again */ 1952 continue; 1953 } 1954 break; 1955 }; 1956 1957 /* Receive the packets till Last Dequeue entry is found with 1958 * respect to the above issues PULL command. 1959 */ 1960 while (!is_last) { 1961 /* Check if the previous issued command is completed. 1962 * Also seems like the SWP is shared between the Ethernet Driver 1963 * and the SEC driver. 1964 */ 1965 while (!qbman_check_command_complete(dq_storage)) 1966 ; 1967 1968 /* Loop until the dq_storage is updated with 1969 * new token by QBMAN 1970 */ 1971 while (!qbman_check_new_result(dq_storage)) 1972 ; 1973 /* Check whether Last Pull command is Expired and 1974 * setting Condition for Loop termination 1975 */ 1976 if (qbman_result_DQ_is_pull_complete(dq_storage)) { 1977 is_last = 1; 1978 /* Check for valid frame. */ 1979 status = (uint8_t)qbman_result_DQ_flags(dq_storage); 1980 if (unlikely( 1981 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) { 1982 DPAA2_SEC_DP_DEBUG("No frame is delivered"); 1983 continue; 1984 } 1985 } 1986 1987 fd = qbman_result_DQ_fd(dq_storage); 1988 ops[num_rx] = sec_fd_to_mbuf(fd, dpaa2_qp); 1989 1990 if (unlikely(fd->simple.frc)) { 1991 /* TODO Parse SEC errors */ 1992 if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_NO_DUMP) { 1993 DPAA2_SEC_DP_ERR("SEC returned Error - %x", 1994 fd->simple.frc); 1995 if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_ERR_DUMP) 1996 dpaa2_sec_dump(ops[num_rx], stdout); 1997 } 1998 1999 dpaa2_qp->rx_vq.err_pkts += 1; 2000 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR; 2001 } else { 2002 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 2003 } 2004 2005 num_rx++; 2006 dq_storage++; 2007 } /* End of Packet Rx loop */ 2008 2009 dpaa2_qp->rx_vq.rx_pkts += num_rx; 2010 2011 DPAA2_SEC_DP_DEBUG("SEC RX pkts %d err pkts %" PRIu64, num_rx, 2012 dpaa2_qp->rx_vq.err_pkts); 2013 /*Return the total number of packets received to DPAA2 app*/ 2014 return num_rx; 2015 } 2016 2017 /** Release queue pair */ 2018 static int 2019 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) 2020 { 2021 struct dpaa2_sec_qp *qp = 2022 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id]; 2023 2024 PMD_INIT_FUNC_TRACE(); 2025 2026 dpaa2_queue_storage_free(&qp->rx_vq, 1); 2027 rte_mempool_free(qp->fle_pool); 2028 rte_free(qp); 2029 2030 dev->data->queue_pairs[queue_pair_id] = NULL; 2031 2032 return 0; 2033 } 2034 2035 /** Setup a queue pair */ 2036 static int 2037 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, 2038 const struct rte_cryptodev_qp_conf *qp_conf, 2039 __rte_unused int socket_id) 2040 { 2041 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 2042 struct dpaa2_sec_qp *qp; 2043 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 2044 struct dpseci_rx_queue_cfg cfg; 2045 int32_t retcode; 2046 char str[RTE_MEMZONE_NAMESIZE]; 2047 2048 PMD_INIT_FUNC_TRACE(); 2049 2050 /* If qp is already in use free ring memory and qp metadata. */ 2051 if (dev->data->queue_pairs[qp_id] != NULL) { 2052 DPAA2_SEC_INFO("QP already setup"); 2053 return 0; 2054 } 2055 2056 if (qp_conf->nb_descriptors < (2 * FLE_POOL_CACHE_SIZE)) { 2057 DPAA2_SEC_ERR("Minimum supported nb_descriptors %d," 2058 " but given %d", (2 * FLE_POOL_CACHE_SIZE), 2059 qp_conf->nb_descriptors); 2060 return -EINVAL; 2061 } 2062 2063 DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p", 2064 dev, qp_id, qp_conf); 2065 2066 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 2067 2068 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp), 2069 RTE_CACHE_LINE_SIZE); 2070 if (!qp) { 2071 DPAA2_SEC_ERR("malloc failed for rx/tx queues"); 2072 return -ENOMEM; 2073 } 2074 2075 qp->rx_vq.crypto_data = dev->data; 2076 qp->tx_vq.crypto_data = dev->data; 2077 retcode = dpaa2_queue_storage_alloc((&qp->rx_vq), 1); 2078 if (retcode) { 2079 dpaa2_queue_storage_free((&qp->rx_vq), 1); 2080 return retcode; 2081 } 2082 2083 dev->data->queue_pairs[qp_id] = qp; 2084 2085 snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d_%d", 2086 getpid(), dev->data->dev_id, qp_id); 2087 qp->fle_pool = rte_mempool_create((const char *)str, 2088 qp_conf->nb_descriptors, 2089 FLE_POOL_BUF_SIZE, 2090 FLE_POOL_CACHE_SIZE, 0, 2091 NULL, NULL, NULL, NULL, 2092 SOCKET_ID_ANY, MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET); 2093 if (!qp->fle_pool) { 2094 DPAA2_SEC_ERR("Mempool (%s) creation failed", str); 2095 return -ENOMEM; 2096 } 2097 2098 cfg.dest_cfg.dest_type = DPSECI_DEST_NONE; 2099 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 2100 qp_id, &cfg); 2101 return retcode; 2102 } 2103 2104 /** Returns the size of the aesni gcm session structure */ 2105 static unsigned int 2106 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) 2107 { 2108 PMD_INIT_FUNC_TRACE(); 2109 2110 return sizeof(dpaa2_sec_session); 2111 } 2112 2113 static int 2114 dpaa2_sec_cipher_init(struct rte_crypto_sym_xform *xform, 2115 dpaa2_sec_session *session) 2116 { 2117 struct alginfo cipherdata; 2118 int bufsize, ret = 0; 2119 struct ctxt_priv *priv; 2120 struct sec_flow_context *flc; 2121 2122 PMD_INIT_FUNC_TRACE(); 2123 2124 /* For SEC CIPHER only one descriptor is required. */ 2125 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2126 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 2127 RTE_CACHE_LINE_SIZE); 2128 if (priv == NULL) { 2129 DPAA2_SEC_ERR("No Memory for priv CTXT"); 2130 return -ENOMEM; 2131 } 2132 2133 flc = &priv->flc_desc[0].flc; 2134 2135 session->ctxt_type = DPAA2_SEC_CIPHER; 2136 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, 2137 RTE_CACHE_LINE_SIZE); 2138 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) { 2139 DPAA2_SEC_ERR("No Memory for cipher key"); 2140 rte_free(priv); 2141 return -ENOMEM; 2142 } 2143 session->cipher_key.length = xform->cipher.key.length; 2144 2145 memcpy(session->cipher_key.data, xform->cipher.key.data, 2146 xform->cipher.key.length); 2147 cipherdata.key = (size_t)session->cipher_key.data; 2148 cipherdata.keylen = session->cipher_key.length; 2149 cipherdata.key_enc_flags = 0; 2150 cipherdata.key_type = RTA_DATA_IMM; 2151 2152 /* Set IV parameters */ 2153 session->iv.offset = xform->cipher.iv.offset; 2154 session->iv.length = xform->cipher.iv.length; 2155 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2156 DIR_ENC : DIR_DEC; 2157 2158 switch (xform->cipher.algo) { 2159 case RTE_CRYPTO_CIPHER_AES_CBC: 2160 cipherdata.algtype = OP_ALG_ALGSEL_AES; 2161 cipherdata.algmode = OP_ALG_AAI_CBC; 2162 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 2163 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 2164 SHR_NEVER, &cipherdata, 2165 session->iv.length, 2166 session->dir); 2167 break; 2168 case RTE_CRYPTO_CIPHER_3DES_CBC: 2169 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 2170 cipherdata.algmode = OP_ALG_AAI_CBC; 2171 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 2172 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 2173 SHR_NEVER, &cipherdata, 2174 session->iv.length, 2175 session->dir); 2176 break; 2177 case RTE_CRYPTO_CIPHER_DES_CBC: 2178 cipherdata.algtype = OP_ALG_ALGSEL_DES; 2179 cipherdata.algmode = OP_ALG_AAI_CBC; 2180 session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC; 2181 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 2182 SHR_NEVER, &cipherdata, 2183 session->iv.length, 2184 session->dir); 2185 break; 2186 case RTE_CRYPTO_CIPHER_AES_CTR: 2187 cipherdata.algtype = OP_ALG_ALGSEL_AES; 2188 cipherdata.algmode = OP_ALG_AAI_CTR; 2189 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 2190 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 2191 SHR_NEVER, &cipherdata, 2192 session->iv.length, 2193 session->dir); 2194 break; 2195 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2196 cipherdata.algtype = OP_ALG_ALGSEL_SNOW_F8; 2197 session->cipher_alg = RTE_CRYPTO_CIPHER_SNOW3G_UEA2; 2198 bufsize = cnstr_shdsc_snow_f8(priv->flc_desc[0].desc, 1, 0, 2199 &cipherdata, 2200 session->dir); 2201 break; 2202 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2203 cipherdata.algtype = OP_ALG_ALGSEL_ZUCE; 2204 session->cipher_alg = RTE_CRYPTO_CIPHER_ZUC_EEA3; 2205 bufsize = cnstr_shdsc_zuce(priv->flc_desc[0].desc, 1, 0, 2206 &cipherdata, 2207 session->dir); 2208 break; 2209 default: 2210 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %s (%u)", 2211 rte_cryptodev_get_cipher_algo_string(xform->cipher.algo), 2212 xform->cipher.algo); 2213 ret = -ENOTSUP; 2214 goto error_out; 2215 } 2216 2217 if (bufsize < 0) { 2218 DPAA2_SEC_ERR("Crypto: Descriptor build failed"); 2219 ret = -EINVAL; 2220 goto error_out; 2221 } 2222 2223 flc->word1_sdl = (uint8_t)bufsize; 2224 session->ctxt = priv; 2225 2226 #ifdef CAAM_DESC_DEBUG 2227 int i; 2228 for (i = 0; i < bufsize; i++) 2229 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]); 2230 #endif 2231 return ret; 2232 2233 error_out: 2234 rte_free(session->cipher_key.data); 2235 rte_free(priv); 2236 return ret; 2237 } 2238 2239 static int 2240 dpaa2_sec_auth_init(struct rte_crypto_sym_xform *xform, 2241 dpaa2_sec_session *session) 2242 { 2243 struct alginfo authdata; 2244 int bufsize, ret = 0; 2245 struct ctxt_priv *priv; 2246 struct sec_flow_context *flc; 2247 2248 PMD_INIT_FUNC_TRACE(); 2249 2250 memset(&authdata, 0, sizeof(authdata)); 2251 2252 /* For SEC AUTH three descriptors are required for various stages */ 2253 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2254 sizeof(struct ctxt_priv) + 3 * 2255 sizeof(struct sec_flc_desc), 2256 RTE_CACHE_LINE_SIZE); 2257 if (priv == NULL) { 2258 DPAA2_SEC_ERR("No Memory for priv CTXT"); 2259 return -ENOMEM; 2260 } 2261 2262 flc = &priv->flc_desc[DESC_INITFINAL].flc; 2263 2264 session->ctxt_type = DPAA2_SEC_AUTH; 2265 session->auth_key.length = xform->auth.key.length; 2266 if (xform->auth.key.length) { 2267 session->auth_key.data = rte_zmalloc(NULL, 2268 xform->auth.key.length, 2269 RTE_CACHE_LINE_SIZE); 2270 if (session->auth_key.data == NULL) { 2271 DPAA2_SEC_ERR("Unable to allocate memory for auth key"); 2272 rte_free(priv); 2273 return -ENOMEM; 2274 } 2275 memcpy(session->auth_key.data, xform->auth.key.data, 2276 xform->auth.key.length); 2277 authdata.key = (size_t)session->auth_key.data; 2278 authdata.key_enc_flags = 0; 2279 authdata.key_type = RTA_DATA_IMM; 2280 } 2281 authdata.keylen = session->auth_key.length; 2282 2283 session->digest_length = xform->auth.digest_length; 2284 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? 2285 DIR_ENC : DIR_DEC; 2286 2287 switch (xform->auth.algo) { 2288 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2289 authdata.algtype = OP_ALG_ALGSEL_SHA1; 2290 authdata.algmode = OP_ALG_AAI_HMAC; 2291 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 2292 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2293 1, 0, SHR_NEVER, &authdata, 2294 !session->dir, 2295 session->digest_length); 2296 break; 2297 case RTE_CRYPTO_AUTH_MD5_HMAC: 2298 authdata.algtype = OP_ALG_ALGSEL_MD5; 2299 authdata.algmode = OP_ALG_AAI_HMAC; 2300 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 2301 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2302 1, 0, SHR_NEVER, &authdata, 2303 !session->dir, 2304 session->digest_length); 2305 break; 2306 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2307 authdata.algtype = OP_ALG_ALGSEL_SHA256; 2308 authdata.algmode = OP_ALG_AAI_HMAC; 2309 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 2310 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2311 1, 0, SHR_NEVER, &authdata, 2312 !session->dir, 2313 session->digest_length); 2314 break; 2315 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2316 authdata.algtype = OP_ALG_ALGSEL_SHA384; 2317 authdata.algmode = OP_ALG_AAI_HMAC; 2318 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 2319 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2320 1, 0, SHR_NEVER, &authdata, 2321 !session->dir, 2322 session->digest_length); 2323 break; 2324 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2325 authdata.algtype = OP_ALG_ALGSEL_SHA512; 2326 authdata.algmode = OP_ALG_AAI_HMAC; 2327 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 2328 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2329 1, 0, SHR_NEVER, &authdata, 2330 !session->dir, 2331 session->digest_length); 2332 break; 2333 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2334 authdata.algtype = OP_ALG_ALGSEL_SHA224; 2335 authdata.algmode = OP_ALG_AAI_HMAC; 2336 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 2337 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2338 1, 0, SHR_NEVER, &authdata, 2339 !session->dir, 2340 session->digest_length); 2341 break; 2342 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2343 authdata.algtype = OP_ALG_ALGSEL_SNOW_F9; 2344 authdata.algmode = OP_ALG_AAI_F9; 2345 session->auth_alg = RTE_CRYPTO_AUTH_SNOW3G_UIA2; 2346 session->iv.offset = xform->auth.iv.offset; 2347 session->iv.length = xform->auth.iv.length; 2348 bufsize = cnstr_shdsc_snow_f9(priv->flc_desc[DESC_INITFINAL].desc, 2349 1, 0, &authdata, 2350 !session->dir, 2351 session->digest_length); 2352 break; 2353 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2354 authdata.algtype = OP_ALG_ALGSEL_ZUCA; 2355 authdata.algmode = OP_ALG_AAI_F9; 2356 session->auth_alg = RTE_CRYPTO_AUTH_ZUC_EIA3; 2357 session->iv.offset = xform->auth.iv.offset; 2358 session->iv.length = xform->auth.iv.length; 2359 bufsize = cnstr_shdsc_zuca(priv->flc_desc[DESC_INITFINAL].desc, 2360 1, 0, &authdata, 2361 !session->dir, 2362 session->digest_length); 2363 break; 2364 case RTE_CRYPTO_AUTH_SHA1: 2365 authdata.algtype = OP_ALG_ALGSEL_SHA1; 2366 authdata.algmode = OP_ALG_AAI_HASH; 2367 session->auth_alg = RTE_CRYPTO_AUTH_SHA1; 2368 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2369 1, 0, SHR_NEVER, &authdata, 2370 !session->dir, 2371 session->digest_length); 2372 break; 2373 case RTE_CRYPTO_AUTH_MD5: 2374 authdata.algtype = OP_ALG_ALGSEL_MD5; 2375 authdata.algmode = OP_ALG_AAI_HASH; 2376 session->auth_alg = RTE_CRYPTO_AUTH_MD5; 2377 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2378 1, 0, SHR_NEVER, &authdata, 2379 !session->dir, 2380 session->digest_length); 2381 break; 2382 case RTE_CRYPTO_AUTH_SHA256: 2383 authdata.algtype = OP_ALG_ALGSEL_SHA256; 2384 authdata.algmode = OP_ALG_AAI_HASH; 2385 session->auth_alg = RTE_CRYPTO_AUTH_SHA256; 2386 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2387 1, 0, SHR_NEVER, &authdata, 2388 !session->dir, 2389 session->digest_length); 2390 break; 2391 case RTE_CRYPTO_AUTH_SHA384: 2392 authdata.algtype = OP_ALG_ALGSEL_SHA384; 2393 authdata.algmode = OP_ALG_AAI_HASH; 2394 session->auth_alg = RTE_CRYPTO_AUTH_SHA384; 2395 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2396 1, 0, SHR_NEVER, &authdata, 2397 !session->dir, 2398 session->digest_length); 2399 break; 2400 case RTE_CRYPTO_AUTH_SHA512: 2401 authdata.algtype = OP_ALG_ALGSEL_SHA512; 2402 authdata.algmode = OP_ALG_AAI_HASH; 2403 session->auth_alg = RTE_CRYPTO_AUTH_SHA512; 2404 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2405 1, 0, SHR_NEVER, &authdata, 2406 !session->dir, 2407 session->digest_length); 2408 break; 2409 case RTE_CRYPTO_AUTH_SHA224: 2410 authdata.algtype = OP_ALG_ALGSEL_SHA224; 2411 authdata.algmode = OP_ALG_AAI_HASH; 2412 session->auth_alg = RTE_CRYPTO_AUTH_SHA224; 2413 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2414 1, 0, SHR_NEVER, &authdata, 2415 !session->dir, 2416 session->digest_length); 2417 break; 2418 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2419 authdata.algtype = OP_ALG_ALGSEL_AES; 2420 authdata.algmode = OP_ALG_AAI_XCBC_MAC; 2421 session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC; 2422 bufsize = cnstr_shdsc_aes_mac( 2423 priv->flc_desc[DESC_INITFINAL].desc, 2424 1, 0, SHR_NEVER, &authdata, 2425 !session->dir, 2426 session->digest_length); 2427 break; 2428 case RTE_CRYPTO_AUTH_AES_CMAC: 2429 authdata.algtype = OP_ALG_ALGSEL_AES; 2430 authdata.algmode = OP_ALG_AAI_CMAC; 2431 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC; 2432 bufsize = cnstr_shdsc_aes_mac( 2433 priv->flc_desc[DESC_INITFINAL].desc, 2434 1, 0, SHR_NEVER, &authdata, 2435 !session->dir, 2436 session->digest_length); 2437 break; 2438 default: 2439 DPAA2_SEC_ERR("Crypto: Unsupported Auth alg %s (%u)", 2440 rte_cryptodev_get_auth_algo_string(xform->auth.algo), 2441 xform->auth.algo); 2442 ret = -ENOTSUP; 2443 goto error_out; 2444 } 2445 2446 if (bufsize < 0) { 2447 DPAA2_SEC_ERR("Crypto: Invalid SEC-DESC buffer length"); 2448 ret = -EINVAL; 2449 goto error_out; 2450 } 2451 2452 flc->word1_sdl = (uint8_t)bufsize; 2453 session->ctxt = priv; 2454 #ifdef CAAM_DESC_DEBUG 2455 int i; 2456 for (i = 0; i < bufsize; i++) 2457 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", 2458 i, priv->flc_desc[DESC_INITFINAL].desc[i]); 2459 #endif 2460 2461 return ret; 2462 2463 error_out: 2464 rte_free(session->auth_key.data); 2465 rte_free(priv); 2466 return ret; 2467 } 2468 2469 static int 2470 dpaa2_sec_aead_init(struct rte_crypto_sym_xform *xform, 2471 dpaa2_sec_session *session) 2472 { 2473 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt; 2474 struct alginfo aeaddata; 2475 int bufsize; 2476 struct ctxt_priv *priv; 2477 struct sec_flow_context *flc; 2478 struct rte_crypto_aead_xform *aead_xform = &xform->aead; 2479 int err, ret = 0; 2480 2481 PMD_INIT_FUNC_TRACE(); 2482 2483 /* Set IV parameters */ 2484 session->iv.offset = aead_xform->iv.offset; 2485 session->iv.length = aead_xform->iv.length; 2486 session->ctxt_type = DPAA2_SEC_AEAD; 2487 2488 /* For SEC AEAD only one descriptor is required */ 2489 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2490 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 2491 RTE_CACHE_LINE_SIZE); 2492 if (priv == NULL) { 2493 DPAA2_SEC_ERR("No Memory for priv CTXT"); 2494 return -ENOMEM; 2495 } 2496 2497 flc = &priv->flc_desc[0].flc; 2498 2499 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 2500 RTE_CACHE_LINE_SIZE); 2501 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 2502 DPAA2_SEC_ERR("No Memory for aead key"); 2503 rte_free(priv); 2504 return -ENOMEM; 2505 } 2506 memcpy(session->aead_key.data, aead_xform->key.data, 2507 aead_xform->key.length); 2508 2509 session->digest_length = aead_xform->digest_length; 2510 session->aead_key.length = aead_xform->key.length; 2511 ctxt->auth_only_len = aead_xform->aad_length; 2512 2513 aeaddata.key = (size_t)session->aead_key.data; 2514 aeaddata.keylen = session->aead_key.length; 2515 aeaddata.key_enc_flags = 0; 2516 aeaddata.key_type = RTA_DATA_IMM; 2517 2518 switch (aead_xform->algo) { 2519 case RTE_CRYPTO_AEAD_AES_GCM: 2520 aeaddata.algtype = OP_ALG_ALGSEL_AES; 2521 aeaddata.algmode = OP_ALG_AAI_GCM; 2522 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; 2523 break; 2524 default: 2525 2526 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %s (%u)", 2527 rte_cryptodev_get_aead_algo_string(aead_xform->algo), 2528 aead_xform->algo); 2529 ret = -ENOTSUP; 2530 goto error_out; 2531 } 2532 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 2533 DIR_ENC : DIR_DEC; 2534 2535 priv->flc_desc[0].desc[0] = aeaddata.keylen; 2536 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 2537 DESC_JOB_IO_LEN, 2538 (unsigned int *)priv->flc_desc[0].desc, 2539 &priv->flc_desc[0].desc[1], 1); 2540 2541 if (err < 0) { 2542 DPAA2_SEC_ERR("Crypto: Incorrect key lengths"); 2543 ret = -EINVAL; 2544 goto error_out; 2545 } 2546 if (priv->flc_desc[0].desc[1] & 1) { 2547 aeaddata.key_type = RTA_DATA_IMM; 2548 } else { 2549 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key); 2550 aeaddata.key_type = RTA_DATA_PTR; 2551 } 2552 priv->flc_desc[0].desc[0] = 0; 2553 priv->flc_desc[0].desc[1] = 0; 2554 2555 if (session->dir == DIR_ENC) 2556 bufsize = cnstr_shdsc_gcm_encap( 2557 priv->flc_desc[0].desc, 1, 0, SHR_NEVER, 2558 &aeaddata, session->iv.length, 2559 session->digest_length); 2560 else 2561 bufsize = cnstr_shdsc_gcm_decap( 2562 priv->flc_desc[0].desc, 1, 0, SHR_NEVER, 2563 &aeaddata, session->iv.length, 2564 session->digest_length); 2565 if (bufsize < 0) { 2566 DPAA2_SEC_ERR("Crypto: Invalid SEC-DESC buffer length"); 2567 ret = -EINVAL; 2568 goto error_out; 2569 } 2570 2571 flc->word1_sdl = (uint8_t)bufsize; 2572 session->ctxt = priv; 2573 #ifdef CAAM_DESC_DEBUG 2574 int i; 2575 for (i = 0; i < bufsize; i++) 2576 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", 2577 i, priv->flc_desc[0].desc[i]); 2578 #endif 2579 return ret; 2580 2581 error_out: 2582 rte_free(session->aead_key.data); 2583 rte_free(priv); 2584 return ret; 2585 } 2586 2587 2588 static int 2589 dpaa2_sec_aead_chain_init(struct rte_crypto_sym_xform *xform, 2590 dpaa2_sec_session *session) 2591 { 2592 struct alginfo authdata, cipherdata; 2593 int bufsize; 2594 struct ctxt_priv *priv; 2595 struct sec_flow_context *flc; 2596 struct rte_crypto_cipher_xform *cipher_xform; 2597 struct rte_crypto_auth_xform *auth_xform; 2598 int err, ret = 0; 2599 2600 PMD_INIT_FUNC_TRACE(); 2601 2602 if (session->ext_params.aead_ctxt.auth_cipher_text) { 2603 cipher_xform = &xform->cipher; 2604 auth_xform = &xform->next->auth; 2605 session->ctxt_type = 2606 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2607 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER; 2608 } else { 2609 cipher_xform = &xform->next->cipher; 2610 auth_xform = &xform->auth; 2611 session->ctxt_type = 2612 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2613 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH; 2614 } 2615 2616 /* Set IV parameters */ 2617 session->iv.offset = cipher_xform->iv.offset; 2618 session->iv.length = cipher_xform->iv.length; 2619 2620 /* For SEC AEAD only one descriptor is required */ 2621 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2622 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 2623 RTE_CACHE_LINE_SIZE); 2624 if (priv == NULL) { 2625 DPAA2_SEC_ERR("No Memory for priv CTXT"); 2626 return -ENOMEM; 2627 } 2628 2629 flc = &priv->flc_desc[0].flc; 2630 2631 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length, 2632 RTE_CACHE_LINE_SIZE); 2633 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) { 2634 DPAA2_SEC_ERR("No Memory for cipher key"); 2635 rte_free(priv); 2636 return -ENOMEM; 2637 } 2638 session->cipher_key.length = cipher_xform->key.length; 2639 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length, 2640 RTE_CACHE_LINE_SIZE); 2641 if (session->auth_key.data == NULL && auth_xform->key.length > 0) { 2642 DPAA2_SEC_ERR("No Memory for auth key"); 2643 rte_free(session->cipher_key.data); 2644 rte_free(priv); 2645 return -ENOMEM; 2646 } 2647 session->auth_key.length = auth_xform->key.length; 2648 memcpy(session->cipher_key.data, cipher_xform->key.data, 2649 cipher_xform->key.length); 2650 memcpy(session->auth_key.data, auth_xform->key.data, 2651 auth_xform->key.length); 2652 2653 authdata.key = (size_t)session->auth_key.data; 2654 authdata.keylen = session->auth_key.length; 2655 authdata.key_enc_flags = 0; 2656 authdata.key_type = RTA_DATA_IMM; 2657 2658 session->digest_length = auth_xform->digest_length; 2659 2660 switch (auth_xform->algo) { 2661 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2662 authdata.algtype = OP_ALG_ALGSEL_SHA1; 2663 authdata.algmode = OP_ALG_AAI_HMAC; 2664 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 2665 break; 2666 case RTE_CRYPTO_AUTH_MD5_HMAC: 2667 authdata.algtype = OP_ALG_ALGSEL_MD5; 2668 authdata.algmode = OP_ALG_AAI_HMAC; 2669 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 2670 break; 2671 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2672 authdata.algtype = OP_ALG_ALGSEL_SHA224; 2673 authdata.algmode = OP_ALG_AAI_HMAC; 2674 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 2675 break; 2676 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2677 authdata.algtype = OP_ALG_ALGSEL_SHA256; 2678 authdata.algmode = OP_ALG_AAI_HMAC; 2679 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 2680 break; 2681 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2682 authdata.algtype = OP_ALG_ALGSEL_SHA384; 2683 authdata.algmode = OP_ALG_AAI_HMAC; 2684 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 2685 break; 2686 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2687 authdata.algtype = OP_ALG_ALGSEL_SHA512; 2688 authdata.algmode = OP_ALG_AAI_HMAC; 2689 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 2690 break; 2691 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2692 authdata.algtype = OP_ALG_ALGSEL_AES; 2693 authdata.algmode = OP_ALG_AAI_XCBC_MAC; 2694 session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC; 2695 break; 2696 case RTE_CRYPTO_AUTH_AES_CMAC: 2697 authdata.algtype = OP_ALG_ALGSEL_AES; 2698 authdata.algmode = OP_ALG_AAI_CMAC; 2699 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC; 2700 break; 2701 default: 2702 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %s (%u)", 2703 rte_cryptodev_get_auth_algo_string(auth_xform->algo), 2704 auth_xform->algo); 2705 ret = -ENOTSUP; 2706 goto error_out; 2707 } 2708 cipherdata.key = (size_t)session->cipher_key.data; 2709 cipherdata.keylen = session->cipher_key.length; 2710 cipherdata.key_enc_flags = 0; 2711 cipherdata.key_type = RTA_DATA_IMM; 2712 2713 switch (cipher_xform->algo) { 2714 case RTE_CRYPTO_CIPHER_AES_CBC: 2715 cipherdata.algtype = OP_ALG_ALGSEL_AES; 2716 cipherdata.algmode = OP_ALG_AAI_CBC; 2717 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 2718 break; 2719 case RTE_CRYPTO_CIPHER_3DES_CBC: 2720 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 2721 cipherdata.algmode = OP_ALG_AAI_CBC; 2722 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 2723 break; 2724 case RTE_CRYPTO_CIPHER_DES_CBC: 2725 cipherdata.algtype = OP_ALG_ALGSEL_DES; 2726 cipherdata.algmode = OP_ALG_AAI_CBC; 2727 session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC; 2728 break; 2729 case RTE_CRYPTO_CIPHER_AES_CTR: 2730 cipherdata.algtype = OP_ALG_ALGSEL_AES; 2731 cipherdata.algmode = OP_ALG_AAI_CTR; 2732 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 2733 break; 2734 default: 2735 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %s (%u)", 2736 rte_cryptodev_get_cipher_algo_string(cipher_xform->algo), 2737 cipher_xform->algo); 2738 ret = -ENOTSUP; 2739 goto error_out; 2740 } 2741 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2742 DIR_ENC : DIR_DEC; 2743 2744 priv->flc_desc[0].desc[0] = cipherdata.keylen; 2745 priv->flc_desc[0].desc[1] = authdata.keylen; 2746 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 2747 DESC_JOB_IO_LEN, 2748 (unsigned int *)priv->flc_desc[0].desc, 2749 &priv->flc_desc[0].desc[2], 2); 2750 2751 if (err < 0) { 2752 DPAA2_SEC_ERR("Crypto: Incorrect key lengths"); 2753 ret = -EINVAL; 2754 goto error_out; 2755 } 2756 if (priv->flc_desc[0].desc[2] & 1) { 2757 cipherdata.key_type = RTA_DATA_IMM; 2758 } else { 2759 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); 2760 cipherdata.key_type = RTA_DATA_PTR; 2761 } 2762 if (priv->flc_desc[0].desc[2] & (1 << 1)) { 2763 authdata.key_type = RTA_DATA_IMM; 2764 } else { 2765 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key); 2766 authdata.key_type = RTA_DATA_PTR; 2767 } 2768 priv->flc_desc[0].desc[0] = 0; 2769 priv->flc_desc[0].desc[1] = 0; 2770 priv->flc_desc[0].desc[2] = 0; 2771 2772 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) { 2773 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1, 2774 0, SHR_SERIAL, 2775 &cipherdata, &authdata, 2776 session->iv.length, 2777 session->digest_length, 2778 session->dir); 2779 if (bufsize < 0) { 2780 DPAA2_SEC_ERR("Crypto: Invalid SEC-DESC buffer length"); 2781 ret = -EINVAL; 2782 goto error_out; 2783 } 2784 } else { 2785 DPAA2_SEC_ERR("Hash before cipher not supported"); 2786 ret = -ENOTSUP; 2787 goto error_out; 2788 } 2789 2790 flc->word1_sdl = (uint8_t)bufsize; 2791 session->ctxt = priv; 2792 #ifdef CAAM_DESC_DEBUG 2793 int i; 2794 for (i = 0; i < bufsize; i++) 2795 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", 2796 i, priv->flc_desc[0].desc[i]); 2797 #endif 2798 2799 return ret; 2800 2801 error_out: 2802 rte_free(session->cipher_key.data); 2803 rte_free(session->auth_key.data); 2804 rte_free(priv); 2805 return ret; 2806 } 2807 2808 static int 2809 dpaa2_sec_set_session_parameters(struct rte_crypto_sym_xform *xform, void *sess) 2810 { 2811 dpaa2_sec_session *session = sess; 2812 int ret; 2813 2814 PMD_INIT_FUNC_TRACE(); 2815 2816 if (unlikely(sess == NULL)) { 2817 DPAA2_SEC_ERR("Invalid session struct"); 2818 return -EINVAL; 2819 } 2820 2821 memset(session, 0, sizeof(dpaa2_sec_session)); 2822 /* Default IV length = 0 */ 2823 session->iv.length = 0; 2824 2825 /* Cipher Only */ 2826 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 2827 ret = dpaa2_sec_cipher_init(xform, session); 2828 2829 /* Authentication Only */ 2830 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2831 xform->next == NULL) { 2832 ret = dpaa2_sec_auth_init(xform, session); 2833 2834 /* Cipher then Authenticate */ 2835 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 2836 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2837 session->ext_params.aead_ctxt.auth_cipher_text = true; 2838 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL) 2839 ret = dpaa2_sec_auth_init(xform, session); 2840 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL) 2841 ret = dpaa2_sec_cipher_init(xform, session); 2842 else 2843 ret = dpaa2_sec_aead_chain_init(xform, session); 2844 /* Authenticate then Cipher */ 2845 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2846 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2847 session->ext_params.aead_ctxt.auth_cipher_text = false; 2848 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL) 2849 ret = dpaa2_sec_cipher_init(xform, session); 2850 else if (xform->next->cipher.algo == RTE_CRYPTO_CIPHER_NULL) 2851 ret = dpaa2_sec_auth_init(xform, session); 2852 else 2853 ret = dpaa2_sec_aead_chain_init(xform, session); 2854 /* AEAD operation for AES-GCM kind of Algorithms */ 2855 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && 2856 xform->next == NULL) { 2857 ret = dpaa2_sec_aead_init(xform, session); 2858 2859 } else { 2860 DPAA2_SEC_ERR("Invalid crypto type"); 2861 return -EINVAL; 2862 } 2863 2864 return ret; 2865 } 2866 2867 static int 2868 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform, 2869 dpaa2_sec_session *session, 2870 struct alginfo *aeaddata) 2871 { 2872 PMD_INIT_FUNC_TRACE(); 2873 2874 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 2875 RTE_CACHE_LINE_SIZE); 2876 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 2877 DPAA2_SEC_ERR("No Memory for aead key"); 2878 return -ENOMEM; 2879 } 2880 memcpy(session->aead_key.data, aead_xform->key.data, 2881 aead_xform->key.length); 2882 2883 session->digest_length = aead_xform->digest_length; 2884 session->aead_key.length = aead_xform->key.length; 2885 2886 aeaddata->key = (size_t)session->aead_key.data; 2887 aeaddata->keylen = session->aead_key.length; 2888 aeaddata->key_enc_flags = 0; 2889 aeaddata->key_type = RTA_DATA_IMM; 2890 2891 switch (aead_xform->algo) { 2892 case RTE_CRYPTO_AEAD_AES_GCM: 2893 switch (session->digest_length) { 2894 case 8: 2895 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM8; 2896 break; 2897 case 12: 2898 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM12; 2899 break; 2900 case 16: 2901 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM16; 2902 break; 2903 default: 2904 DPAA2_SEC_ERR("Crypto: Undefined GCM digest %d", 2905 session->digest_length); 2906 return -EINVAL; 2907 } 2908 aeaddata->algmode = OP_ALG_AAI_GCM; 2909 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; 2910 break; 2911 case RTE_CRYPTO_AEAD_AES_CCM: 2912 switch (session->digest_length) { 2913 case 8: 2914 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM8; 2915 break; 2916 case 12: 2917 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM12; 2918 break; 2919 case 16: 2920 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM16; 2921 break; 2922 default: 2923 DPAA2_SEC_ERR("Crypto: Undefined CCM digest %d", 2924 session->digest_length); 2925 return -EINVAL; 2926 } 2927 aeaddata->algmode = OP_ALG_AAI_CCM; 2928 session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM; 2929 break; 2930 default: 2931 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u", 2932 aead_xform->algo); 2933 return -ENOTSUP; 2934 } 2935 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 2936 DIR_ENC : DIR_DEC; 2937 2938 return 0; 2939 } 2940 2941 static int 2942 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform, 2943 struct rte_crypto_auth_xform *auth_xform, 2944 dpaa2_sec_session *session, 2945 struct alginfo *cipherdata, 2946 struct alginfo *authdata) 2947 { 2948 if (cipher_xform) { 2949 session->cipher_key.data = rte_zmalloc(NULL, 2950 cipher_xform->key.length, 2951 RTE_CACHE_LINE_SIZE); 2952 if (session->cipher_key.data == NULL && 2953 cipher_xform->key.length > 0) { 2954 DPAA2_SEC_ERR("No Memory for cipher key"); 2955 return -ENOMEM; 2956 } 2957 2958 session->cipher_key.length = cipher_xform->key.length; 2959 memcpy(session->cipher_key.data, cipher_xform->key.data, 2960 cipher_xform->key.length); 2961 session->cipher_alg = cipher_xform->algo; 2962 } else { 2963 session->cipher_key.data = NULL; 2964 session->cipher_key.length = 0; 2965 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2966 } 2967 2968 if (auth_xform) { 2969 session->auth_key.data = rte_zmalloc(NULL, 2970 auth_xform->key.length, 2971 RTE_CACHE_LINE_SIZE); 2972 if (session->auth_key.data == NULL && 2973 auth_xform->key.length > 0) { 2974 DPAA2_SEC_ERR("No Memory for auth key"); 2975 return -ENOMEM; 2976 } 2977 session->auth_key.length = auth_xform->key.length; 2978 memcpy(session->auth_key.data, auth_xform->key.data, 2979 auth_xform->key.length); 2980 session->auth_alg = auth_xform->algo; 2981 session->digest_length = auth_xform->digest_length; 2982 } else { 2983 session->auth_key.data = NULL; 2984 session->auth_key.length = 0; 2985 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 2986 } 2987 2988 authdata->key = (size_t)session->auth_key.data; 2989 authdata->keylen = session->auth_key.length; 2990 authdata->key_enc_flags = 0; 2991 authdata->key_type = RTA_DATA_IMM; 2992 switch (session->auth_alg) { 2993 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2994 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96; 2995 authdata->algmode = OP_ALG_AAI_HMAC; 2996 break; 2997 case RTE_CRYPTO_AUTH_MD5_HMAC: 2998 authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96; 2999 authdata->algmode = OP_ALG_AAI_HMAC; 3000 break; 3001 case RTE_CRYPTO_AUTH_SHA224_HMAC: 3002 authdata->algmode = OP_ALG_AAI_HMAC; 3003 if (session->digest_length == 6) 3004 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_224_96; 3005 else if (session->digest_length == 14) 3006 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_224_224; 3007 else 3008 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_224_112; 3009 break; 3010 case RTE_CRYPTO_AUTH_SHA256_HMAC: 3011 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128; 3012 authdata->algmode = OP_ALG_AAI_HMAC; 3013 if (session->digest_length != 16) 3014 DPAA2_SEC_WARN( 3015 "+++Using sha256-hmac truncated len is non-standard," 3016 "it will not work with lookaside proto"); 3017 break; 3018 case RTE_CRYPTO_AUTH_SHA384_HMAC: 3019 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192; 3020 authdata->algmode = OP_ALG_AAI_HMAC; 3021 break; 3022 case RTE_CRYPTO_AUTH_SHA512_HMAC: 3023 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256; 3024 authdata->algmode = OP_ALG_AAI_HMAC; 3025 break; 3026 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 3027 authdata->algtype = OP_PCL_IPSEC_AES_XCBC_MAC_96; 3028 authdata->algmode = OP_ALG_AAI_XCBC_MAC; 3029 break; 3030 case RTE_CRYPTO_AUTH_AES_CMAC: 3031 authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96; 3032 authdata->algmode = OP_ALG_AAI_CMAC; 3033 break; 3034 case RTE_CRYPTO_AUTH_NULL: 3035 authdata->algtype = OP_PCL_IPSEC_HMAC_NULL; 3036 break; 3037 default: 3038 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %s (%u)", 3039 rte_cryptodev_get_auth_algo_string(session->auth_alg), 3040 session->auth_alg); 3041 return -ENOTSUP; 3042 } 3043 cipherdata->key = (size_t)session->cipher_key.data; 3044 cipherdata->keylen = session->cipher_key.length; 3045 cipherdata->key_enc_flags = 0; 3046 cipherdata->key_type = RTA_DATA_IMM; 3047 3048 switch (session->cipher_alg) { 3049 case RTE_CRYPTO_CIPHER_AES_CBC: 3050 cipherdata->algtype = OP_PCL_IPSEC_AES_CBC; 3051 cipherdata->algmode = OP_ALG_AAI_CBC; 3052 break; 3053 case RTE_CRYPTO_CIPHER_3DES_CBC: 3054 cipherdata->algtype = OP_PCL_IPSEC_3DES; 3055 cipherdata->algmode = OP_ALG_AAI_CBC; 3056 break; 3057 case RTE_CRYPTO_CIPHER_DES_CBC: 3058 cipherdata->algtype = OP_PCL_IPSEC_DES; 3059 cipherdata->algmode = OP_ALG_AAI_CBC; 3060 break; 3061 case RTE_CRYPTO_CIPHER_AES_CTR: 3062 cipherdata->algtype = OP_PCL_IPSEC_AES_CTR; 3063 cipherdata->algmode = OP_ALG_AAI_CTR; 3064 break; 3065 case RTE_CRYPTO_CIPHER_NULL: 3066 cipherdata->algtype = OP_PCL_IPSEC_NULL; 3067 break; 3068 default: 3069 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %s (%u)", 3070 rte_cryptodev_get_cipher_algo_string(session->cipher_alg), 3071 session->cipher_alg); 3072 return -ENOTSUP; 3073 } 3074 3075 return 0; 3076 } 3077 3078 static int 3079 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, 3080 struct rte_security_session_conf *conf, 3081 void *sess) 3082 { 3083 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec; 3084 struct rte_crypto_cipher_xform *cipher_xform = NULL; 3085 struct rte_crypto_auth_xform *auth_xform = NULL; 3086 struct rte_crypto_aead_xform *aead_xform = NULL; 3087 dpaa2_sec_session *session = (dpaa2_sec_session *)sess; 3088 struct ctxt_priv *priv; 3089 struct alginfo authdata, cipherdata; 3090 int bufsize; 3091 struct sec_flow_context *flc; 3092 uint64_t flc_iova; 3093 int ret = -1; 3094 3095 PMD_INIT_FUNC_TRACE(); 3096 3097 RTE_SET_USED(dev); 3098 3099 /** Make FLC address to align with stashing, low 6 bits are used 3100 * control stashing. 3101 */ 3102 priv = rte_zmalloc(NULL, sizeof(struct ctxt_priv) + 3103 sizeof(struct sec_flc_desc), 3104 DPAA2_STASHING_ALIGN_SIZE); 3105 3106 if (priv == NULL) { 3107 DPAA2_SEC_ERR("No memory for priv CTXT"); 3108 return -ENOMEM; 3109 } 3110 3111 flc = &priv->flc_desc[0].flc; 3112 3113 if (ipsec_xform->life.bytes_hard_limit != 0 || 3114 ipsec_xform->life.bytes_soft_limit != 0 || 3115 ipsec_xform->life.packets_hard_limit != 0 || 3116 ipsec_xform->life.packets_soft_limit != 0) { 3117 rte_free(priv); 3118 return -ENOTSUP; 3119 } 3120 3121 memset(session, 0, sizeof(dpaa2_sec_session)); 3122 3123 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 3124 cipher_xform = &conf->crypto_xform->cipher; 3125 if (conf->crypto_xform->next) 3126 auth_xform = &conf->crypto_xform->next->auth; 3127 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform, 3128 session, &cipherdata, &authdata); 3129 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 3130 auth_xform = &conf->crypto_xform->auth; 3131 if (conf->crypto_xform->next) 3132 cipher_xform = &conf->crypto_xform->next->cipher; 3133 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform, 3134 session, &cipherdata, &authdata); 3135 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { 3136 aead_xform = &conf->crypto_xform->aead; 3137 ret = dpaa2_sec_ipsec_aead_init(aead_xform, 3138 session, &cipherdata); 3139 authdata.keylen = 0; 3140 authdata.algtype = 0; 3141 } else { 3142 DPAA2_SEC_ERR("XFORM not specified"); 3143 ret = -EINVAL; 3144 goto out; 3145 } 3146 if (ret) { 3147 DPAA2_SEC_ERR("Failed to process xform"); 3148 goto out; 3149 } 3150 3151 session->ctxt_type = DPAA2_SEC_IPSEC; 3152 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 3153 uint8_t hdr[48] = {}; 3154 struct rte_ipv4_hdr *ip4_hdr; 3155 struct rte_ipv6_hdr *ip6_hdr; 3156 struct ipsec_encap_pdb encap_pdb; 3157 3158 flc->dhr = SEC_FLC_DHR_OUTBOUND; 3159 /* For Sec Proto only one descriptor is required. */ 3160 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb)); 3161 3162 /* copy algo specific data to PDB */ 3163 switch (cipherdata.algtype) { 3164 case OP_PCL_IPSEC_AES_CTR: 3165 encap_pdb.ctr.ctr_initial = 0x00000001; 3166 encap_pdb.ctr.ctr_nonce = ipsec_xform->salt; 3167 break; 3168 case OP_PCL_IPSEC_AES_GCM8: 3169 case OP_PCL_IPSEC_AES_GCM12: 3170 case OP_PCL_IPSEC_AES_GCM16: 3171 memcpy(encap_pdb.gcm.salt, 3172 (uint8_t *)&(ipsec_xform->salt), 4); 3173 break; 3174 } 3175 3176 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) | 3177 PDBOPTS_ESP_OIHI_PDB_INL | 3178 PDBHMO_ESP_SNR; 3179 3180 if (ipsec_xform->options.iv_gen_disable == 0) 3181 encap_pdb.options |= PDBOPTS_ESP_IVSRC; 3182 /* Initializing the sequence number to 1, Security 3183 * engine will choose this sequence number for first packet 3184 * Refer: RFC4303 section: 3.3.3.Sequence Number Generation 3185 */ 3186 encap_pdb.seq_num = 1; 3187 if (ipsec_xform->options.esn) { 3188 encap_pdb.options |= PDBOPTS_ESP_ESN; 3189 encap_pdb.seq_num_ext_hi = conf->ipsec.esn.hi; 3190 encap_pdb.seq_num = conf->ipsec.esn.low; 3191 } 3192 if (ipsec_xform->options.copy_dscp) 3193 encap_pdb.options |= PDBOPTS_ESP_DIFFSERV; 3194 if (ipsec_xform->options.ecn) 3195 encap_pdb.options |= PDBOPTS_ESP_TECN; 3196 encap_pdb.spi = ipsec_xform->spi; 3197 session->dir = DIR_ENC; 3198 if (ipsec_xform->tunnel.type == 3199 RTE_SECURITY_IPSEC_TUNNEL_IPV4) { 3200 if (ipsec_xform->options.dec_ttl) 3201 encap_pdb.options |= PDBHMO_ESP_ENCAP_DTTL; 3202 if (ipsec_xform->options.copy_df) 3203 encap_pdb.options |= PDBHMO_ESP_DFBIT; 3204 ip4_hdr = (struct rte_ipv4_hdr *)hdr; 3205 3206 encap_pdb.ip_hdr_len = sizeof(struct rte_ipv4_hdr); 3207 ip4_hdr->version_ihl = RTE_IPV4_VHL_DEF; 3208 ip4_hdr->time_to_live = ipsec_xform->tunnel.ipv4.ttl ? 3209 ipsec_xform->tunnel.ipv4.ttl : 0x40; 3210 ip4_hdr->type_of_service = (ipsec_xform->tunnel.ipv4.dscp<<2); 3211 3212 ip4_hdr->hdr_checksum = 0; 3213 ip4_hdr->packet_id = 0; 3214 if (ipsec_xform->tunnel.ipv4.df) { 3215 uint16_t frag_off = 0; 3216 3217 frag_off |= RTE_IPV4_HDR_DF_FLAG; 3218 ip4_hdr->fragment_offset = rte_cpu_to_be_16(frag_off); 3219 } else 3220 ip4_hdr->fragment_offset = 0; 3221 3222 memcpy(&ip4_hdr->src_addr, &ipsec_xform->tunnel.ipv4.src_ip, 3223 sizeof(struct in_addr)); 3224 memcpy(&ip4_hdr->dst_addr, &ipsec_xform->tunnel.ipv4.dst_ip, 3225 sizeof(struct in_addr)); 3226 if (ipsec_xform->options.udp_encap) { 3227 uint16_t sport, dport; 3228 struct rte_udp_hdr *uh = 3229 (struct rte_udp_hdr *) (hdr + 3230 sizeof(struct rte_ipv4_hdr)); 3231 3232 sport = ipsec_xform->udp.sport ? 3233 ipsec_xform->udp.sport : 4500; 3234 dport = ipsec_xform->udp.dport ? 3235 ipsec_xform->udp.dport : 4500; 3236 uh->src_port = rte_cpu_to_be_16(sport); 3237 uh->dst_port = rte_cpu_to_be_16(dport); 3238 uh->dgram_len = 0; 3239 uh->dgram_cksum = 0; 3240 3241 ip4_hdr->next_proto_id = IPPROTO_UDP; 3242 ip4_hdr->total_length = 3243 rte_cpu_to_be_16( 3244 sizeof(struct rte_ipv4_hdr) + 3245 sizeof(struct rte_udp_hdr)); 3246 encap_pdb.ip_hdr_len += 3247 sizeof(struct rte_udp_hdr); 3248 encap_pdb.options |= 3249 PDBOPTS_ESP_NAT | PDBOPTS_ESP_NUC; 3250 } else { 3251 ip4_hdr->total_length = 3252 rte_cpu_to_be_16( 3253 sizeof(struct rte_ipv4_hdr)); 3254 ip4_hdr->next_proto_id = IPPROTO_ESP; 3255 } 3256 3257 ip4_hdr->hdr_checksum = calc_chksum((uint16_t *) 3258 (void *)ip4_hdr, sizeof(struct rte_ipv4_hdr)); 3259 3260 } else if (ipsec_xform->tunnel.type == 3261 RTE_SECURITY_IPSEC_TUNNEL_IPV6) { 3262 ip6_hdr = (struct rte_ipv6_hdr *)hdr; 3263 3264 ip6_hdr->vtc_flow = rte_cpu_to_be_32( 3265 DPAA2_IPv6_DEFAULT_VTC_FLOW | 3266 ((ipsec_xform->tunnel.ipv6.dscp << 3267 RTE_IPV6_HDR_TC_SHIFT) & 3268 RTE_IPV6_HDR_TC_MASK) | 3269 ((ipsec_xform->tunnel.ipv6.flabel << 3270 RTE_IPV6_HDR_FL_SHIFT) & 3271 RTE_IPV6_HDR_FL_MASK)); 3272 /* Payload length will be updated by HW */ 3273 ip6_hdr->payload_len = 0; 3274 ip6_hdr->hop_limits = ipsec_xform->tunnel.ipv6.hlimit ? 3275 ipsec_xform->tunnel.ipv6.hlimit : 0x40; 3276 ip6_hdr->proto = (ipsec_xform->proto == 3277 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? 3278 IPPROTO_ESP : IPPROTO_AH; 3279 memcpy(&ip6_hdr->src_addr, 3280 &ipsec_xform->tunnel.ipv6.src_addr, 16); 3281 memcpy(&ip6_hdr->dst_addr, 3282 &ipsec_xform->tunnel.ipv6.dst_addr, 16); 3283 encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr); 3284 } 3285 3286 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc, 3287 1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ? 3288 SHR_WAIT : SHR_SERIAL, &encap_pdb, 3289 hdr, &cipherdata, &authdata); 3290 } else if (ipsec_xform->direction == 3291 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { 3292 struct ipsec_decap_pdb decap_pdb; 3293 3294 flc->dhr = SEC_FLC_DHR_INBOUND; 3295 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb)); 3296 /* copy algo specific data to PDB */ 3297 switch (cipherdata.algtype) { 3298 case OP_PCL_IPSEC_AES_CTR: 3299 decap_pdb.ctr.ctr_initial = 0x00000001; 3300 decap_pdb.ctr.ctr_nonce = ipsec_xform->salt; 3301 break; 3302 case OP_PCL_IPSEC_AES_GCM8: 3303 case OP_PCL_IPSEC_AES_GCM12: 3304 case OP_PCL_IPSEC_AES_GCM16: 3305 memcpy(decap_pdb.gcm.salt, 3306 (uint8_t *)&(ipsec_xform->salt), 4); 3307 break; 3308 } 3309 3310 if (ipsec_xform->tunnel.type == 3311 RTE_SECURITY_IPSEC_TUNNEL_IPV4) { 3312 decap_pdb.options = sizeof(struct ip) << 16; 3313 if (ipsec_xform->options.copy_df) 3314 decap_pdb.options |= PDBHMO_ESP_DFV; 3315 if (ipsec_xform->options.dec_ttl) 3316 decap_pdb.options |= PDBHMO_ESP_DECAP_DTTL; 3317 } else { 3318 decap_pdb.options = sizeof(struct rte_ipv6_hdr) << 16; 3319 } 3320 if (ipsec_xform->options.esn) { 3321 decap_pdb.options |= PDBOPTS_ESP_ESN; 3322 decap_pdb.seq_num_ext_hi = conf->ipsec.esn.hi; 3323 decap_pdb.seq_num = conf->ipsec.esn.low; 3324 } 3325 if (ipsec_xform->options.copy_dscp) 3326 decap_pdb.options |= PDBOPTS_ESP_DIFFSERV; 3327 if (ipsec_xform->options.ecn) 3328 decap_pdb.options |= PDBOPTS_ESP_TECN; 3329 3330 if (ipsec_xform->replay_win_sz) { 3331 uint32_t win_sz; 3332 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz); 3333 3334 if (rta_sec_era < RTA_SEC_ERA_10 && win_sz > 128) { 3335 DPAA2_SEC_INFO("Max Anti replay Win sz = 128"); 3336 win_sz = 128; 3337 } 3338 switch (win_sz) { 3339 case 1: 3340 case 2: 3341 case 4: 3342 case 8: 3343 case 16: 3344 case 32: 3345 decap_pdb.options |= PDBOPTS_ESP_ARS32; 3346 break; 3347 case 64: 3348 decap_pdb.options |= PDBOPTS_ESP_ARS64; 3349 break; 3350 case 256: 3351 decap_pdb.options |= PDBOPTS_ESP_ARS256; 3352 break; 3353 case 512: 3354 decap_pdb.options |= PDBOPTS_ESP_ARS512; 3355 break; 3356 case 1024: 3357 decap_pdb.options |= PDBOPTS_ESP_ARS1024; 3358 break; 3359 case 128: 3360 default: 3361 decap_pdb.options |= PDBOPTS_ESP_ARS128; 3362 } 3363 } 3364 session->dir = DIR_DEC; 3365 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc, 3366 1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ? 3367 SHR_WAIT : SHR_SERIAL, 3368 &decap_pdb, &cipherdata, &authdata); 3369 } else { 3370 ret = -EINVAL; 3371 goto out; 3372 } 3373 3374 if (bufsize < 0) { 3375 ret = -EINVAL; 3376 DPAA2_SEC_ERR("Crypto: Invalid SEC-DESC buffer length"); 3377 goto out; 3378 } 3379 3380 flc->word1_sdl = (uint8_t)bufsize; 3381 3382 flc_iova = DPAA2_VADDR_TO_IOVA(flc); 3383 /* Enable the stashing control bit and data stashing only.*/ 3384 DPAA2_SET_FLC_RSC(flc); 3385 dpaa2_flc_stashing_set(DPAA2_FLC_DATA_STASHING, 1, 3386 &flc_iova); 3387 flc->word2_rflc_31_0 = lower_32_bits(flc_iova); 3388 flc->word3_rflc_63_32 = upper_32_bits(flc_iova); 3389 3390 /* Set EWS bit i.e. enable write-safe */ 3391 DPAA2_SET_FLC_EWS(flc); 3392 /* Set BS = 1 i.e reuse input buffers as output buffers */ 3393 DPAA2_SET_FLC_REUSE_BS(flc); 3394 /* Set FF = 10; reuse input buffers if they provide sufficient space */ 3395 DPAA2_SET_FLC_REUSE_FF(flc); 3396 3397 session->ctxt = priv; 3398 3399 return 0; 3400 out: 3401 rte_free(session->auth_key.data); 3402 rte_free(session->cipher_key.data); 3403 rte_free(priv); 3404 return ret; 3405 } 3406 3407 static int 3408 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev, 3409 struct rte_security_session_conf *conf, 3410 void *sess) 3411 { 3412 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp; 3413 struct rte_crypto_sym_xform *xform = conf->crypto_xform; 3414 struct rte_crypto_auth_xform *auth_xform = NULL; 3415 struct rte_crypto_cipher_xform *cipher_xform = NULL; 3416 dpaa2_sec_session *session = (dpaa2_sec_session *)sess; 3417 struct ctxt_priv *priv; 3418 struct alginfo authdata, cipherdata; 3419 struct alginfo *p_authdata = NULL; 3420 int bufsize = -1; 3421 struct sec_flow_context *flc; 3422 uint64_t flc_iova; 3423 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 3424 int swap = true; 3425 #else 3426 int swap = false; 3427 #endif 3428 3429 PMD_INIT_FUNC_TRACE(); 3430 3431 RTE_SET_USED(dev); 3432 3433 memset(session, 0, sizeof(dpaa2_sec_session)); 3434 3435 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 3436 sizeof(struct ctxt_priv) + 3437 sizeof(struct sec_flc_desc), 3438 RTE_CACHE_LINE_SIZE); 3439 3440 if (priv == NULL) { 3441 DPAA2_SEC_ERR("No memory for priv CTXT"); 3442 return -ENOMEM; 3443 } 3444 3445 flc = &priv->flc_desc[0].flc; 3446 3447 /* find xfrm types */ 3448 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 3449 cipher_xform = &xform->cipher; 3450 if (xform->next != NULL && 3451 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 3452 session->ext_params.aead_ctxt.auth_cipher_text = true; 3453 auth_xform = &xform->next->auth; 3454 } 3455 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 3456 auth_xform = &xform->auth; 3457 if (xform->next != NULL && 3458 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 3459 session->ext_params.aead_ctxt.auth_cipher_text = false; 3460 cipher_xform = &xform->next->cipher; 3461 } 3462 } else { 3463 DPAA2_SEC_ERR("Invalid crypto type"); 3464 rte_free(priv); 3465 return -EINVAL; 3466 } 3467 3468 session->ctxt_type = DPAA2_SEC_PDCP; 3469 if (cipher_xform) { 3470 session->cipher_key.data = rte_zmalloc(NULL, 3471 cipher_xform->key.length, 3472 RTE_CACHE_LINE_SIZE); 3473 if (session->cipher_key.data == NULL && 3474 cipher_xform->key.length > 0) { 3475 DPAA2_SEC_ERR("No Memory for cipher key"); 3476 rte_free(priv); 3477 return -ENOMEM; 3478 } 3479 session->cipher_key.length = cipher_xform->key.length; 3480 memcpy(session->cipher_key.data, cipher_xform->key.data, 3481 cipher_xform->key.length); 3482 session->dir = 3483 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 3484 DIR_ENC : DIR_DEC; 3485 session->cipher_alg = cipher_xform->algo; 3486 } else { 3487 session->cipher_key.data = NULL; 3488 session->cipher_key.length = 0; 3489 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 3490 session->dir = DIR_ENC; 3491 } 3492 3493 session->pdcp.domain = pdcp_xform->domain; 3494 session->pdcp.bearer = pdcp_xform->bearer; 3495 session->pdcp.pkt_dir = pdcp_xform->pkt_dir; 3496 session->pdcp.sn_size = pdcp_xform->sn_size; 3497 session->pdcp.hfn = pdcp_xform->hfn; 3498 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold; 3499 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd; 3500 /* hfv ovd offset location is stored in iv.offset value*/ 3501 if (cipher_xform) 3502 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset; 3503 3504 cipherdata.key = (size_t)session->cipher_key.data; 3505 cipherdata.keylen = session->cipher_key.length; 3506 cipherdata.key_enc_flags = 0; 3507 cipherdata.key_type = RTA_DATA_IMM; 3508 3509 switch (session->cipher_alg) { 3510 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 3511 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW; 3512 break; 3513 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 3514 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC; 3515 break; 3516 case RTE_CRYPTO_CIPHER_AES_CTR: 3517 cipherdata.algtype = PDCP_CIPHER_TYPE_AES; 3518 break; 3519 case RTE_CRYPTO_CIPHER_NULL: 3520 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL; 3521 break; 3522 default: 3523 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 3524 session->cipher_alg); 3525 goto out; 3526 } 3527 3528 if (auth_xform) { 3529 session->auth_key.data = rte_zmalloc(NULL, 3530 auth_xform->key.length, 3531 RTE_CACHE_LINE_SIZE); 3532 if (!session->auth_key.data && 3533 auth_xform->key.length > 0) { 3534 DPAA2_SEC_ERR("No Memory for auth key"); 3535 rte_free(session->cipher_key.data); 3536 rte_free(priv); 3537 return -ENOMEM; 3538 } 3539 session->auth_key.length = auth_xform->key.length; 3540 memcpy(session->auth_key.data, auth_xform->key.data, 3541 auth_xform->key.length); 3542 session->auth_alg = auth_xform->algo; 3543 } else { 3544 session->auth_key.data = NULL; 3545 session->auth_key.length = 0; 3546 session->auth_alg = 0; 3547 authdata.algtype = PDCP_AUTH_TYPE_NULL; 3548 } 3549 authdata.key = (size_t)session->auth_key.data; 3550 authdata.keylen = session->auth_key.length; 3551 authdata.key_enc_flags = 0; 3552 authdata.key_type = RTA_DATA_IMM; 3553 3554 if (session->auth_alg) { 3555 switch (session->auth_alg) { 3556 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 3557 authdata.algtype = PDCP_AUTH_TYPE_SNOW; 3558 break; 3559 case RTE_CRYPTO_AUTH_ZUC_EIA3: 3560 authdata.algtype = PDCP_AUTH_TYPE_ZUC; 3561 break; 3562 case RTE_CRYPTO_AUTH_AES_CMAC: 3563 authdata.algtype = PDCP_AUTH_TYPE_AES; 3564 break; 3565 case RTE_CRYPTO_AUTH_NULL: 3566 authdata.algtype = PDCP_AUTH_TYPE_NULL; 3567 break; 3568 default: 3569 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 3570 session->auth_alg); 3571 goto out; 3572 } 3573 p_authdata = &authdata; 3574 } else { 3575 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 3576 DPAA2_SEC_ERR("Crypto: Integrity must for c-plane"); 3577 goto out; 3578 } 3579 session->auth_key.data = NULL; 3580 session->auth_key.length = 0; 3581 session->auth_alg = 0; 3582 } 3583 authdata.key = (size_t)session->auth_key.data; 3584 authdata.keylen = session->auth_key.length; 3585 authdata.key_enc_flags = 0; 3586 authdata.key_type = RTA_DATA_IMM; 3587 3588 if (pdcp_xform->sdap_enabled) { 3589 int nb_keys_to_inline = 3590 rta_inline_pdcp_sdap_query(authdata.algtype, 3591 cipherdata.algtype, 3592 session->pdcp.sn_size, 3593 session->pdcp.hfn_ovd); 3594 if (nb_keys_to_inline >= 1) { 3595 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); 3596 cipherdata.key_type = RTA_DATA_PTR; 3597 } 3598 if (nb_keys_to_inline >= 2) { 3599 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key); 3600 authdata.key_type = RTA_DATA_PTR; 3601 } 3602 } else { 3603 if (rta_inline_pdcp_query(authdata.algtype, 3604 cipherdata.algtype, 3605 session->pdcp.sn_size, 3606 session->pdcp.hfn_ovd)) { 3607 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); 3608 cipherdata.key_type = RTA_DATA_PTR; 3609 } 3610 } 3611 3612 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 3613 if (session->dir == DIR_ENC) 3614 bufsize = cnstr_shdsc_pdcp_c_plane_encap( 3615 priv->flc_desc[0].desc, 1, swap, 3616 pdcp_xform->hfn, 3617 session->pdcp.sn_size, 3618 pdcp_xform->bearer, 3619 pdcp_xform->pkt_dir, 3620 pdcp_xform->hfn_threshold, 3621 &cipherdata, &authdata); 3622 else if (session->dir == DIR_DEC) 3623 bufsize = cnstr_shdsc_pdcp_c_plane_decap( 3624 priv->flc_desc[0].desc, 1, swap, 3625 pdcp_xform->hfn, 3626 session->pdcp.sn_size, 3627 pdcp_xform->bearer, 3628 pdcp_xform->pkt_dir, 3629 pdcp_xform->hfn_threshold, 3630 &cipherdata, &authdata); 3631 3632 } else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) { 3633 bufsize = cnstr_shdsc_pdcp_short_mac(priv->flc_desc[0].desc, 3634 1, swap, &authdata); 3635 } else { 3636 if (session->dir == DIR_ENC) { 3637 if (pdcp_xform->sdap_enabled) 3638 bufsize = cnstr_shdsc_pdcp_sdap_u_plane_encap( 3639 priv->flc_desc[0].desc, 1, swap, 3640 session->pdcp.sn_size, 3641 pdcp_xform->hfn, 3642 pdcp_xform->bearer, 3643 pdcp_xform->pkt_dir, 3644 pdcp_xform->hfn_threshold, 3645 &cipherdata, p_authdata); 3646 else 3647 bufsize = cnstr_shdsc_pdcp_u_plane_encap( 3648 priv->flc_desc[0].desc, 1, swap, 3649 session->pdcp.sn_size, 3650 pdcp_xform->hfn, 3651 pdcp_xform->bearer, 3652 pdcp_xform->pkt_dir, 3653 pdcp_xform->hfn_threshold, 3654 &cipherdata, p_authdata); 3655 } else if (session->dir == DIR_DEC) { 3656 if (pdcp_xform->sdap_enabled) 3657 bufsize = cnstr_shdsc_pdcp_sdap_u_plane_decap( 3658 priv->flc_desc[0].desc, 1, swap, 3659 session->pdcp.sn_size, 3660 pdcp_xform->hfn, 3661 pdcp_xform->bearer, 3662 pdcp_xform->pkt_dir, 3663 pdcp_xform->hfn_threshold, 3664 &cipherdata, p_authdata); 3665 else 3666 bufsize = cnstr_shdsc_pdcp_u_plane_decap( 3667 priv->flc_desc[0].desc, 1, swap, 3668 session->pdcp.sn_size, 3669 pdcp_xform->hfn, 3670 pdcp_xform->bearer, 3671 pdcp_xform->pkt_dir, 3672 pdcp_xform->hfn_threshold, 3673 &cipherdata, p_authdata); 3674 } 3675 } 3676 3677 if (bufsize < 0) { 3678 DPAA2_SEC_ERR("Crypto: Invalid SEC-DESC buffer length"); 3679 goto out; 3680 } 3681 3682 flc_iova = DPAA2_VADDR_TO_IOVA(flc); 3683 /* Enable the stashing control bit and data stashing only.*/ 3684 DPAA2_SET_FLC_RSC(flc); 3685 dpaa2_flc_stashing_set(DPAA2_FLC_DATA_STASHING, 1, 3686 &flc_iova); 3687 flc->word2_rflc_31_0 = lower_32_bits(flc_iova); 3688 flc->word3_rflc_63_32 = upper_32_bits(flc_iova); 3689 3690 flc->word1_sdl = (uint8_t)bufsize; 3691 3692 /* TODO - check the perf impact or 3693 * align as per descriptor type 3694 * Set EWS bit i.e. enable write-safe 3695 * DPAA2_SET_FLC_EWS(flc); 3696 */ 3697 3698 /* Set BS = 1 i.e reuse input buffers as output buffers */ 3699 DPAA2_SET_FLC_REUSE_BS(flc); 3700 /* Set FF = 10; reuse input buffers if they provide sufficient space */ 3701 DPAA2_SET_FLC_REUSE_FF(flc); 3702 3703 session->ctxt = priv; 3704 3705 return 0; 3706 out: 3707 rte_free(session->auth_key.data); 3708 rte_free(session->cipher_key.data); 3709 rte_free(priv); 3710 return -EINVAL; 3711 } 3712 3713 static int 3714 dpaa2_sec_security_session_create(void *dev, 3715 struct rte_security_session_conf *conf, 3716 struct rte_security_session *sess) 3717 { 3718 void *sess_private_data = SECURITY_GET_SESS_PRIV(sess); 3719 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; 3720 int ret; 3721 3722 switch (conf->protocol) { 3723 case RTE_SECURITY_PROTOCOL_IPSEC: 3724 ret = dpaa2_sec_set_ipsec_session(cdev, conf, 3725 sess_private_data); 3726 break; 3727 case RTE_SECURITY_PROTOCOL_MACSEC: 3728 return -ENOTSUP; 3729 case RTE_SECURITY_PROTOCOL_PDCP: 3730 ret = dpaa2_sec_set_pdcp_session(cdev, conf, 3731 sess_private_data); 3732 break; 3733 default: 3734 return -EINVAL; 3735 } 3736 if (ret != 0) { 3737 DPAA2_SEC_DEBUG("Failed to configure session parameters %d", ret); 3738 return ret; 3739 } 3740 3741 return ret; 3742 } 3743 3744 /** Clear the memory of session so it doesn't leave key material behind */ 3745 static int 3746 dpaa2_sec_security_session_destroy(void *dev __rte_unused, 3747 struct rte_security_session *sess) 3748 { 3749 PMD_INIT_FUNC_TRACE(); 3750 void *sess_priv = SECURITY_GET_SESS_PRIV(sess); 3751 3752 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv; 3753 3754 if (sess_priv) { 3755 rte_free(s->ctxt); 3756 rte_free(s->cipher_key.data); 3757 rte_free(s->auth_key.data); 3758 memset(s, 0, sizeof(dpaa2_sec_session)); 3759 } 3760 return 0; 3761 } 3762 3763 static int 3764 dpaa2_sec_security_session_update(void *dev, 3765 struct rte_security_session *sess, 3766 struct rte_security_session_conf *conf) 3767 { 3768 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; 3769 void *sess_private_data = SECURITY_GET_SESS_PRIV(sess); 3770 int ret; 3771 3772 if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC && 3773 conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) 3774 return -ENOTSUP; 3775 3776 dpaa2_sec_security_session_destroy(dev, sess); 3777 3778 ret = dpaa2_sec_set_ipsec_session(cdev, conf, 3779 sess_private_data); 3780 if (ret != 0) { 3781 DPAA2_SEC_DEBUG("Failed to configure session parameters %d", ret); 3782 return ret; 3783 } 3784 3785 return ret; 3786 } 3787 3788 static unsigned int 3789 dpaa2_sec_security_session_get_size(void *device __rte_unused) 3790 { 3791 return sizeof(dpaa2_sec_session); 3792 } 3793 3794 static int 3795 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev __rte_unused, 3796 struct rte_crypto_sym_xform *xform, 3797 struct rte_cryptodev_sym_session *sess) 3798 { 3799 void *sess_private_data = CRYPTODEV_GET_SYM_SESS_PRIV(sess); 3800 int ret; 3801 3802 ret = dpaa2_sec_set_session_parameters(xform, sess_private_data); 3803 if (ret != 0) { 3804 DPAA2_SEC_DEBUG("Failed to configure session parameters %d", ret); 3805 /* Return session to mempool */ 3806 return ret; 3807 } 3808 3809 return 0; 3810 } 3811 3812 /** Clear the memory of session so it doesn't leave key material behind */ 3813 static void 3814 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev __rte_unused, 3815 struct rte_cryptodev_sym_session *sess) 3816 { 3817 PMD_INIT_FUNC_TRACE(); 3818 dpaa2_sec_session *s = CRYPTODEV_GET_SYM_SESS_PRIV(sess); 3819 3820 if (s) { 3821 rte_free(s->ctxt); 3822 rte_free(s->cipher_key.data); 3823 rte_free(s->auth_key.data); 3824 } 3825 } 3826 3827 static int 3828 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused, 3829 struct rte_cryptodev_config *config __rte_unused) 3830 { 3831 PMD_INIT_FUNC_TRACE(); 3832 3833 return 0; 3834 } 3835 3836 static int 3837 dpaa2_sec_dev_start(struct rte_cryptodev *dev) 3838 { 3839 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3840 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3841 struct dpseci_attr attr; 3842 struct dpaa2_queue *dpaa2_q; 3843 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3844 dev->data->queue_pairs; 3845 struct dpseci_rx_queue_attr rx_attr; 3846 struct dpseci_tx_queue_attr tx_attr; 3847 int ret, i; 3848 3849 PMD_INIT_FUNC_TRACE(); 3850 3851 /* Change the tx burst function if ordered queues are used */ 3852 if (priv->en_ordered) 3853 dev->enqueue_burst = dpaa2_sec_enqueue_burst_ordered; 3854 3855 memset(&attr, 0, sizeof(struct dpseci_attr)); 3856 3857 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token); 3858 if (ret) { 3859 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED", 3860 priv->hw_id); 3861 goto get_attr_failure; 3862 } 3863 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr); 3864 if (ret) { 3865 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC"); 3866 goto get_attr_failure; 3867 } 3868 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) { 3869 dpaa2_q = &qp[i]->rx_vq; 3870 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 3871 &rx_attr); 3872 dpaa2_q->fqid = rx_attr.fqid; 3873 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid); 3874 } 3875 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) { 3876 dpaa2_q = &qp[i]->tx_vq; 3877 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 3878 &tx_attr); 3879 dpaa2_q->fqid = tx_attr.fqid; 3880 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid); 3881 } 3882 3883 return 0; 3884 get_attr_failure: 3885 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 3886 return -1; 3887 } 3888 3889 static void 3890 dpaa2_sec_dev_stop(struct rte_cryptodev *dev) 3891 { 3892 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3893 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3894 int ret; 3895 3896 PMD_INIT_FUNC_TRACE(); 3897 3898 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 3899 if (ret) { 3900 DPAA2_SEC_ERR("Failure in disabling dpseci %d device", 3901 priv->hw_id); 3902 return; 3903 } 3904 3905 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token); 3906 if (ret < 0) { 3907 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret); 3908 return; 3909 } 3910 } 3911 3912 static int 3913 dpaa2_sec_dev_close(struct rte_cryptodev *dev __rte_unused) 3914 { 3915 PMD_INIT_FUNC_TRACE(); 3916 3917 return 0; 3918 } 3919 3920 static void 3921 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev, 3922 struct rte_cryptodev_info *info) 3923 { 3924 struct dpaa2_sec_dev_private *internals = dev->data->dev_private; 3925 3926 PMD_INIT_FUNC_TRACE(); 3927 if (info != NULL) { 3928 info->max_nb_queue_pairs = internals->max_nb_queue_pairs; 3929 info->feature_flags = dev->feature_flags; 3930 info->capabilities = dpaa2_sec_capabilities; 3931 /* No limit of number of sessions */ 3932 info->sym.max_nb_sessions = 0; 3933 info->driver_id = cryptodev_driver_id; 3934 } 3935 } 3936 3937 static 3938 void dpaa2_sec_stats_get(struct rte_cryptodev *dev, 3939 struct rte_cryptodev_stats *stats) 3940 { 3941 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3942 struct fsl_mc_io dpseci; 3943 struct dpseci_sec_counters counters = {0}; 3944 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3945 dev->data->queue_pairs; 3946 int ret, i; 3947 3948 PMD_INIT_FUNC_TRACE(); 3949 if (stats == NULL) { 3950 DPAA2_SEC_ERR("Invalid stats ptr NULL"); 3951 return; 3952 } 3953 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 3954 if (qp == NULL || qp[i] == NULL) { 3955 DPAA2_SEC_DEBUG("Uninitialised queue pair"); 3956 continue; 3957 } 3958 3959 stats->enqueued_count += qp[i]->tx_vq.tx_pkts; 3960 stats->dequeued_count += qp[i]->rx_vq.rx_pkts; 3961 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts; 3962 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts; 3963 } 3964 3965 /* In case as secondary process access stats, MCP portal in priv-hw 3966 * may have primary process address. Need the secondary process 3967 * based MCP portal address for this object. 3968 */ 3969 dpseci.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX); 3970 ret = dpseci_get_sec_counters(&dpseci, CMD_PRI_LOW, priv->token, 3971 &counters); 3972 if (ret) { 3973 DPAA2_SEC_ERR("SEC counters failed"); 3974 } else { 3975 DPAA2_SEC_INFO("dpseci hardware stats:"); 3976 DPAA2_SEC_INFO("\tNum of Requests Dequeued = %" PRIu64, 3977 counters.dequeued_requests); 3978 DPAA2_SEC_INFO("\tNum of Outbound Encrypt Requests = %" PRIu64, 3979 counters.ob_enc_requests); 3980 DPAA2_SEC_INFO("\tNum of Inbound Decrypt Requests = %" PRIu64, 3981 counters.ib_dec_requests); 3982 DPAA2_SEC_INFO("\tNum of Outbound Bytes Encrypted = %" PRIu64, 3983 counters.ob_enc_bytes); 3984 DPAA2_SEC_INFO("\tNum of Outbound Bytes Protected = %" PRIu64, 3985 counters.ob_prot_bytes); 3986 DPAA2_SEC_INFO("\tNum of Inbound Bytes Decrypted = %" PRIu64, 3987 counters.ib_dec_bytes); 3988 DPAA2_SEC_INFO("\tNum of Inbound Bytes Validated = %" PRIu64, 3989 counters.ib_valid_bytes); 3990 } 3991 } 3992 3993 static 3994 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev) 3995 { 3996 int i; 3997 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3998 (dev->data->queue_pairs); 3999 4000 PMD_INIT_FUNC_TRACE(); 4001 4002 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 4003 if (qp[i] == NULL) { 4004 DPAA2_SEC_DEBUG("Uninitialised queue pair"); 4005 continue; 4006 } 4007 qp[i]->tx_vq.rx_pkts = 0; 4008 qp[i]->tx_vq.tx_pkts = 0; 4009 qp[i]->tx_vq.err_pkts = 0; 4010 qp[i]->rx_vq.rx_pkts = 0; 4011 qp[i]->rx_vq.tx_pkts = 0; 4012 qp[i]->rx_vq.err_pkts = 0; 4013 } 4014 } 4015 4016 static void __rte_hot 4017 dpaa2_sec_process_parallel_event(struct qbman_swp *swp, 4018 const struct qbman_fd *fd, 4019 const struct qbman_result *dq, 4020 struct dpaa2_queue *rxq, 4021 struct rte_event *ev) 4022 { 4023 struct dpaa2_sec_qp *qp; 4024 4025 qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq); 4026 ev->flow_id = rxq->ev.flow_id; 4027 ev->sub_event_type = rxq->ev.sub_event_type; 4028 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 4029 ev->op = RTE_EVENT_OP_NEW; 4030 ev->sched_type = rxq->ev.sched_type; 4031 ev->queue_id = rxq->ev.queue_id; 4032 ev->priority = rxq->ev.priority; 4033 ev->event_ptr = sec_fd_to_mbuf(fd, qp); 4034 4035 qbman_swp_dqrr_consume(swp, dq); 4036 } 4037 4038 static void 4039 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused, 4040 const struct qbman_fd *fd, 4041 const struct qbman_result *dq, 4042 struct dpaa2_queue *rxq, 4043 struct rte_event *ev) 4044 { 4045 uint8_t dqrr_index; 4046 struct dpaa2_sec_qp *qp; 4047 struct rte_crypto_op *crypto_op; 4048 4049 qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq); 4050 ev->flow_id = rxq->ev.flow_id; 4051 ev->sub_event_type = rxq->ev.sub_event_type; 4052 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 4053 ev->op = RTE_EVENT_OP_NEW; 4054 ev->sched_type = rxq->ev.sched_type; 4055 ev->queue_id = rxq->ev.queue_id; 4056 ev->priority = rxq->ev.priority; 4057 4058 crypto_op = sec_fd_to_mbuf(fd, qp); 4059 dqrr_index = qbman_get_dqrr_idx(dq); 4060 *dpaa2_seqn(crypto_op->sym->m_src) = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index; 4061 DPAA2_PER_LCORE_DQRR_SIZE++; 4062 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; 4063 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src; 4064 ev->event_ptr = crypto_op; 4065 } 4066 4067 static void __rte_hot 4068 dpaa2_sec_process_ordered_event(struct qbman_swp *swp, 4069 const struct qbman_fd *fd, 4070 const struct qbman_result *dq, 4071 struct dpaa2_queue *rxq, 4072 struct rte_event *ev) 4073 { 4074 struct rte_crypto_op *crypto_op; 4075 struct dpaa2_sec_qp *qp; 4076 4077 qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq); 4078 ev->flow_id = rxq->ev.flow_id; 4079 ev->sub_event_type = rxq->ev.sub_event_type; 4080 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 4081 ev->op = RTE_EVENT_OP_NEW; 4082 ev->sched_type = rxq->ev.sched_type; 4083 ev->queue_id = rxq->ev.queue_id; 4084 ev->priority = rxq->ev.priority; 4085 crypto_op = sec_fd_to_mbuf(fd, qp); 4086 4087 *dpaa2_seqn(crypto_op->sym->m_src) = DPAA2_ENQUEUE_FLAG_ORP; 4088 *dpaa2_seqn(crypto_op->sym->m_src) |= qbman_result_DQ_odpid(dq) << 4089 DPAA2_EQCR_OPRID_SHIFT; 4090 *dpaa2_seqn(crypto_op->sym->m_src) |= qbman_result_DQ_seqnum(dq) << 4091 DPAA2_EQCR_SEQNUM_SHIFT; 4092 4093 qbman_swp_dqrr_consume(swp, dq); 4094 ev->event_ptr = crypto_op; 4095 } 4096 4097 int 4098 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev, 4099 int qp_id, 4100 struct dpaa2_dpcon_dev *dpcon, 4101 const struct rte_event *event) 4102 { 4103 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 4104 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 4105 struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id]; 4106 struct dpseci_rx_queue_cfg cfg; 4107 uint8_t priority; 4108 int ret; 4109 4110 if (event->sched_type == RTE_SCHED_TYPE_PARALLEL) 4111 qp->rx_vq.cb = dpaa2_sec_process_parallel_event; 4112 else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) 4113 qp->rx_vq.cb = dpaa2_sec_process_atomic_event; 4114 else if (event->sched_type == RTE_SCHED_TYPE_ORDERED) 4115 qp->rx_vq.cb = dpaa2_sec_process_ordered_event; 4116 else 4117 return -EINVAL; 4118 4119 priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / event->priority) * 4120 (dpcon->num_priorities - 1); 4121 4122 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 4123 cfg.options = DPSECI_QUEUE_OPT_DEST; 4124 cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON; 4125 cfg.dest_cfg.dest_id = dpcon->dpcon_id; 4126 cfg.dest_cfg.priority = priority; 4127 4128 cfg.options |= DPSECI_QUEUE_OPT_USER_CTX; 4129 cfg.user_ctx = (size_t)(&qp->rx_vq); 4130 if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) { 4131 cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION; 4132 cfg.order_preservation_en = 1; 4133 } 4134 4135 if (event->sched_type == RTE_SCHED_TYPE_ORDERED) { 4136 struct opr_cfg ocfg; 4137 4138 /* Restoration window size = 256 frames */ 4139 ocfg.oprrws = 3; 4140 /* Restoration window size = 512 frames for LX2 */ 4141 if (dpaa2_svr_family == SVR_LX2160A) 4142 ocfg.oprrws = 4; 4143 /* Auto advance NESN window enabled */ 4144 ocfg.oa = 1; 4145 /* Late arrival window size disabled */ 4146 ocfg.olws = 0; 4147 /* ORL resource exhaustaion advance NESN disabled */ 4148 ocfg.oeane = 0; 4149 4150 if (priv->en_loose_ordered) 4151 ocfg.oloe = 1; 4152 else 4153 ocfg.oloe = 0; 4154 4155 ret = dpseci_set_opr(dpseci, CMD_PRI_LOW, priv->token, 4156 qp_id, OPR_OPT_CREATE, &ocfg); 4157 if (ret) { 4158 DPAA2_SEC_ERR("Error setting opr: ret: %d", ret); 4159 return ret; 4160 } 4161 qp->tx_vq.cb_eqresp_free = dpaa2_sec_free_eqresp_buf; 4162 priv->en_ordered = 1; 4163 } 4164 4165 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 4166 qp_id, &cfg); 4167 if (ret) { 4168 DPAA2_SEC_ERR("Error in dpseci_set_queue: ret: %d", ret); 4169 return ret; 4170 } 4171 4172 memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event)); 4173 4174 return 0; 4175 } 4176 4177 int 4178 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev, 4179 int qp_id) 4180 { 4181 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 4182 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 4183 struct dpseci_rx_queue_cfg cfg; 4184 int ret; 4185 4186 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 4187 cfg.options = DPSECI_QUEUE_OPT_DEST; 4188 cfg.dest_cfg.dest_type = DPSECI_DEST_NONE; 4189 4190 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 4191 qp_id, &cfg); 4192 if (ret) 4193 DPAA2_SEC_ERR("Error in dpseci_set_queue: ret: %d", ret); 4194 4195 return ret; 4196 } 4197 4198 static struct rte_cryptodev_ops crypto_ops = { 4199 .dev_configure = dpaa2_sec_dev_configure, 4200 .dev_start = dpaa2_sec_dev_start, 4201 .dev_stop = dpaa2_sec_dev_stop, 4202 .dev_close = dpaa2_sec_dev_close, 4203 .dev_infos_get = dpaa2_sec_dev_infos_get, 4204 .stats_get = dpaa2_sec_stats_get, 4205 .stats_reset = dpaa2_sec_stats_reset, 4206 .queue_pair_setup = dpaa2_sec_queue_pair_setup, 4207 .queue_pair_release = dpaa2_sec_queue_pair_release, 4208 .sym_session_get_size = dpaa2_sec_sym_session_get_size, 4209 .sym_session_configure = dpaa2_sec_sym_session_configure, 4210 .sym_session_clear = dpaa2_sec_sym_session_clear, 4211 /* Raw data-path API related operations */ 4212 .sym_get_raw_dp_ctx_size = dpaa2_sec_get_dp_ctx_size, 4213 .sym_configure_raw_dp_ctx = dpaa2_sec_configure_raw_dp_ctx, 4214 }; 4215 4216 static const struct rte_security_capability * 4217 dpaa2_sec_capabilities_get(void *device __rte_unused) 4218 { 4219 return dpaa2_sec_security_cap; 4220 } 4221 4222 static const struct rte_security_ops dpaa2_sec_security_ops = { 4223 .session_create = dpaa2_sec_security_session_create, 4224 .session_update = dpaa2_sec_security_session_update, 4225 .session_get_size = dpaa2_sec_security_session_get_size, 4226 .session_stats_get = NULL, 4227 .session_destroy = dpaa2_sec_security_session_destroy, 4228 .set_pkt_metadata = NULL, 4229 .capabilities_get = dpaa2_sec_capabilities_get 4230 }; 4231 4232 static int 4233 dpaa2_sec_uninit(const struct rte_cryptodev *dev) 4234 { 4235 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 4236 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 4237 int ret; 4238 4239 PMD_INIT_FUNC_TRACE(); 4240 4241 /* Function is reverse of dpaa2_sec_dev_init. 4242 * It does the following: 4243 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id 4244 * 2. Close the DPSECI device 4245 * 3. Free the allocated resources. 4246 */ 4247 4248 /*Close the device at underlying layer*/ 4249 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token); 4250 if (ret) { 4251 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret); 4252 return -1; 4253 } 4254 4255 /*Free the allocated memory for ethernet private data and dpseci*/ 4256 priv->hw = NULL; 4257 rte_free(dpseci); 4258 rte_free(dev->security_ctx); 4259 4260 DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u", 4261 dev->data->name, rte_socket_id()); 4262 4263 return 0; 4264 } 4265 4266 static int 4267 check_devargs_handler(const char *key, const char *value, 4268 void *opaque) 4269 { 4270 struct rte_cryptodev *dev = (struct rte_cryptodev *)opaque; 4271 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 4272 4273 if (!strcmp(key, "drv_strict_order")) { 4274 priv->en_loose_ordered = false; 4275 } else if (!strcmp(key, "drv_dump_mode")) { 4276 dpaa2_sec_dp_dump = atoi(value); 4277 if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_FULL_DUMP) { 4278 DPAA2_SEC_WARN("WARN: DPAA2_SEC_DP_DUMP_LEVEL is not " 4279 "supported, changing to FULL error" 4280 " prints"); 4281 dpaa2_sec_dp_dump = DPAA2_SEC_DP_FULL_DUMP; 4282 } 4283 } else 4284 return -1; 4285 4286 return 0; 4287 } 4288 4289 static void 4290 dpaa2_sec_get_devargs(struct rte_cryptodev *cryptodev, const char *key) 4291 { 4292 struct rte_kvargs *kvlist; 4293 struct rte_devargs *devargs; 4294 4295 devargs = cryptodev->device->devargs; 4296 if (!devargs) 4297 return; 4298 4299 kvlist = rte_kvargs_parse(devargs->args, NULL); 4300 if (!kvlist) 4301 return; 4302 4303 if (!rte_kvargs_count(kvlist, key)) { 4304 rte_kvargs_free(kvlist); 4305 return; 4306 } 4307 4308 rte_kvargs_process(kvlist, key, 4309 check_devargs_handler, (void *)cryptodev); 4310 rte_kvargs_free(kvlist); 4311 } 4312 4313 static int 4314 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev) 4315 { 4316 struct dpaa2_sec_dev_private *internals; 4317 struct rte_device *dev = cryptodev->device; 4318 struct rte_dpaa2_device *dpaa2_dev; 4319 struct rte_security_ctx *security_instance; 4320 struct fsl_mc_io *dpseci; 4321 uint16_t token; 4322 struct dpseci_attr attr; 4323 int retcode, hw_id; 4324 4325 PMD_INIT_FUNC_TRACE(); 4326 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 4327 hw_id = dpaa2_dev->object_id; 4328 4329 cryptodev->driver_id = cryptodev_driver_id; 4330 cryptodev->dev_ops = &crypto_ops; 4331 4332 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst; 4333 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst; 4334 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 4335 RTE_CRYPTODEV_FF_HW_ACCELERATED | 4336 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 4337 RTE_CRYPTODEV_FF_SECURITY | 4338 RTE_CRYPTODEV_FF_SYM_RAW_DP | 4339 RTE_CRYPTODEV_FF_IN_PLACE_SGL | 4340 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | 4341 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | 4342 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | 4343 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; 4344 4345 internals = cryptodev->data->dev_private; 4346 4347 /* 4348 * For secondary processes, we don't initialise any further as primary 4349 * has already done this work. Only check we don't need a different 4350 * RX function 4351 */ 4352 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 4353 DPAA2_SEC_DEBUG("Device already init by primary process"); 4354 return 0; 4355 } 4356 4357 /* Initialize security_ctx only for primary process*/ 4358 security_instance = rte_malloc("rte_security_instances_ops", 4359 sizeof(struct rte_security_ctx), 0); 4360 if (security_instance == NULL) 4361 return -ENOMEM; 4362 security_instance->device = (void *)cryptodev; 4363 security_instance->ops = &dpaa2_sec_security_ops; 4364 security_instance->sess_cnt = 0; 4365 cryptodev->security_ctx = security_instance; 4366 4367 /*Open the rte device via MC and save the handle for further use*/ 4368 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1, 4369 sizeof(struct fsl_mc_io), 0); 4370 if (!dpseci) { 4371 DPAA2_SEC_ERR( 4372 "Error in allocating the memory for dpsec object"); 4373 return -ENOMEM; 4374 } 4375 dpseci->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX); 4376 4377 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token); 4378 if (retcode != 0) { 4379 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x", 4380 retcode); 4381 goto init_error; 4382 } 4383 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr); 4384 if (retcode != 0) { 4385 DPAA2_SEC_ERR( 4386 "Cannot get dpsec device attributed: Error = %x", 4387 retcode); 4388 goto init_error; 4389 } 4390 snprintf(cryptodev->data->name, sizeof(cryptodev->data->name), 4391 "dpsec-%u", hw_id); 4392 4393 internals->max_nb_queue_pairs = attr.num_tx_queues; 4394 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs; 4395 internals->hw = dpseci; 4396 internals->token = token; 4397 internals->en_loose_ordered = true; 4398 4399 dpaa2_sec_get_devargs(cryptodev, DRIVER_DUMP_MODE); 4400 dpaa2_sec_get_devargs(cryptodev, DRIVER_STRICT_ORDER); 4401 DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name); 4402 return 0; 4403 4404 init_error: 4405 DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name); 4406 4407 /* dpaa2_sec_uninit(crypto_dev_name); */ 4408 return -EFAULT; 4409 } 4410 4411 static int 4412 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused, 4413 struct rte_dpaa2_device *dpaa2_dev) 4414 { 4415 struct rte_cryptodev *cryptodev; 4416 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 4417 4418 int retval; 4419 4420 snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d", 4421 dpaa2_dev->object_id); 4422 4423 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id()); 4424 if (cryptodev == NULL) 4425 return -ENOMEM; 4426 4427 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 4428 cryptodev->data->dev_private = rte_zmalloc_socket( 4429 "cryptodev private structure", 4430 sizeof(struct dpaa2_sec_dev_private), 4431 RTE_CACHE_LINE_SIZE, 4432 rte_socket_id()); 4433 4434 if (cryptodev->data->dev_private == NULL) 4435 rte_panic("Cannot allocate memzone for private " 4436 "device data"); 4437 } 4438 4439 dpaa2_dev->cryptodev = cryptodev; 4440 cryptodev->device = &dpaa2_dev->device; 4441 4442 /* init user callbacks */ 4443 TAILQ_INIT(&(cryptodev->link_intr_cbs)); 4444 4445 if (dpaa2_svr_family == SVR_LX2160A) 4446 rta_set_sec_era(RTA_SEC_ERA_10); 4447 else 4448 rta_set_sec_era(RTA_SEC_ERA_8); 4449 4450 DPAA2_SEC_INFO("2-SEC ERA is %d", USER_SEC_ERA(rta_get_sec_era())); 4451 4452 /* Invoke PMD device initialization function */ 4453 retval = dpaa2_sec_dev_init(cryptodev); 4454 if (retval == 0) { 4455 rte_cryptodev_pmd_probing_finish(cryptodev); 4456 return 0; 4457 } 4458 4459 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 4460 rte_free(cryptodev->data->dev_private); 4461 4462 cryptodev->attached = RTE_CRYPTODEV_DETACHED; 4463 4464 return -ENXIO; 4465 } 4466 4467 static int 4468 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev) 4469 { 4470 struct rte_cryptodev *cryptodev; 4471 int ret; 4472 4473 cryptodev = dpaa2_dev->cryptodev; 4474 if (cryptodev == NULL) 4475 return -ENODEV; 4476 4477 ret = dpaa2_sec_uninit(cryptodev); 4478 if (ret) 4479 return ret; 4480 4481 return rte_cryptodev_pmd_destroy(cryptodev); 4482 } 4483 4484 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = { 4485 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA, 4486 .drv_type = DPAA2_CRYPTO, 4487 .driver = { 4488 .name = "DPAA2 SEC PMD" 4489 }, 4490 .probe = cryptodev_dpaa2_sec_probe, 4491 .remove = cryptodev_dpaa2_sec_remove, 4492 }; 4493 4494 static struct cryptodev_driver dpaa2_sec_crypto_drv; 4495 4496 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver); 4497 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, 4498 rte_dpaa2_sec_driver.driver, cryptodev_driver_id); 4499 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_DPAA2_SEC_PMD, 4500 DRIVER_STRICT_ORDER "=<int>" 4501 DRIVER_DUMP_MODE "=<int>"); 4502 RTE_LOG_REGISTER(dpaa2_logtype_sec, pmd.crypto.dpaa2, NOTICE); 4503