1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2016-2021 NXP 5 * 6 */ 7 8 #include <time.h> 9 #include <net/if.h> 10 #include <unistd.h> 11 12 #include <rte_ip.h> 13 #include <rte_mbuf.h> 14 #include <rte_cryptodev.h> 15 #include <rte_malloc.h> 16 #include <rte_memcpy.h> 17 #include <rte_string_fns.h> 18 #include <rte_cycles.h> 19 #include <rte_kvargs.h> 20 #include <rte_dev.h> 21 #include <cryptodev_pmd.h> 22 #include <rte_common.h> 23 #include <rte_fslmc.h> 24 #include <fslmc_vfio.h> 25 #include <dpaa2_hw_pvt.h> 26 #include <dpaa2_hw_dpio.h> 27 #include <dpaa2_hw_mempool.h> 28 #include <fsl_dpopr.h> 29 #include <fsl_dpseci.h> 30 #include <fsl_mc_sys.h> 31 32 #include "dpaa2_sec_priv.h" 33 #include "dpaa2_sec_event.h" 34 #include "dpaa2_sec_logs.h" 35 36 /* RTA header files */ 37 #include <desc/ipsec.h> 38 #include <desc/pdcp.h> 39 #include <desc/sdap.h> 40 #include <desc/algo.h> 41 42 /* Minimum job descriptor consists of a oneword job descriptor HEADER and 43 * a pointer to the shared descriptor 44 */ 45 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ) 46 #define FSL_VENDOR_ID 0x1957 47 #define FSL_DEVICE_ID 0x410 48 #define FSL_SUBSYSTEM_SEC 1 49 #define FSL_MC_DPSECI_DEVID 3 50 51 #define NO_PREFETCH 0 52 53 uint8_t cryptodev_driver_id; 54 55 #ifdef RTE_LIB_SECURITY 56 static inline int 57 build_proto_compound_sg_fd(dpaa2_sec_session *sess, 58 struct rte_crypto_op *op, 59 struct qbman_fd *fd, uint16_t bpid) 60 { 61 struct rte_crypto_sym_op *sym_op = op->sym; 62 struct ctxt_priv *priv = sess->ctxt; 63 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 64 struct sec_flow_context *flc; 65 struct rte_mbuf *mbuf; 66 uint32_t in_len = 0, out_len = 0; 67 68 if (sym_op->m_dst) 69 mbuf = sym_op->m_dst; 70 else 71 mbuf = sym_op->m_src; 72 73 /* first FLE entry used to store mbuf and session ctxt */ 74 fle = (struct qbman_fle *)rte_malloc(NULL, 75 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 76 RTE_CACHE_LINE_SIZE); 77 if (unlikely(!fle)) { 78 DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE"); 79 return -ENOMEM; 80 } 81 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 82 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 83 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 84 85 /* Save the shared descriptor */ 86 flc = &priv->flc_desc[0].flc; 87 88 op_fle = fle + 1; 89 ip_fle = fle + 2; 90 sge = fle + 3; 91 92 if (likely(bpid < MAX_BPID)) { 93 DPAA2_SET_FD_BPID(fd, bpid); 94 DPAA2_SET_FLE_BPID(op_fle, bpid); 95 DPAA2_SET_FLE_BPID(ip_fle, bpid); 96 } else { 97 DPAA2_SET_FD_IVP(fd); 98 DPAA2_SET_FLE_IVP(op_fle); 99 DPAA2_SET_FLE_IVP(ip_fle); 100 } 101 102 /* Configure FD as a FRAME LIST */ 103 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 104 DPAA2_SET_FD_COMPOUND_FMT(fd); 105 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 106 107 /* Configure Output FLE with Scatter/Gather Entry */ 108 DPAA2_SET_FLE_SG_EXT(op_fle); 109 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 110 111 /* Configure Output SGE for Encap/Decap */ 112 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 113 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 114 /* o/p segs */ 115 while (mbuf->next) { 116 sge->length = mbuf->data_len; 117 out_len += sge->length; 118 sge++; 119 mbuf = mbuf->next; 120 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 121 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 122 } 123 /* using buf_len for last buf - so that extra data can be added */ 124 sge->length = mbuf->buf_len - mbuf->data_off; 125 out_len += sge->length; 126 127 DPAA2_SET_FLE_FIN(sge); 128 op_fle->length = out_len; 129 130 sge++; 131 mbuf = sym_op->m_src; 132 133 /* Configure Input FLE with Scatter/Gather Entry */ 134 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 135 DPAA2_SET_FLE_SG_EXT(ip_fle); 136 DPAA2_SET_FLE_FIN(ip_fle); 137 138 /* Configure input SGE for Encap/Decap */ 139 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 140 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 141 sge->length = mbuf->data_len; 142 in_len += sge->length; 143 144 mbuf = mbuf->next; 145 /* i/p segs */ 146 while (mbuf) { 147 sge++; 148 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 149 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 150 sge->length = mbuf->data_len; 151 in_len += sge->length; 152 mbuf = mbuf->next; 153 } 154 ip_fle->length = in_len; 155 DPAA2_SET_FLE_FIN(sge); 156 157 /* In case of PDCP, per packet HFN is stored in 158 * mbuf priv after sym_op. 159 */ 160 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) { 161 uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op + 162 sess->pdcp.hfn_ovd_offset); 163 /*enable HFN override override */ 164 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd); 165 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd); 166 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd); 167 } 168 DPAA2_SET_FD_LEN(fd, ip_fle->length); 169 170 return 0; 171 } 172 173 static inline int 174 build_proto_compound_fd(dpaa2_sec_session *sess, 175 struct rte_crypto_op *op, 176 struct qbman_fd *fd, uint16_t bpid) 177 { 178 struct rte_crypto_sym_op *sym_op = op->sym; 179 struct ctxt_priv *priv = sess->ctxt; 180 struct qbman_fle *fle, *ip_fle, *op_fle; 181 struct sec_flow_context *flc; 182 struct rte_mbuf *src_mbuf = sym_op->m_src; 183 struct rte_mbuf *dst_mbuf = sym_op->m_dst; 184 int retval; 185 186 if (!dst_mbuf) 187 dst_mbuf = src_mbuf; 188 189 /* Save the shared descriptor */ 190 flc = &priv->flc_desc[0].flc; 191 192 /* we are using the first FLE entry to store Mbuf */ 193 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 194 if (retval) { 195 DPAA2_SEC_DP_ERR("Memory alloc failed"); 196 return -ENOMEM; 197 } 198 memset(fle, 0, FLE_POOL_BUF_SIZE); 199 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 200 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 201 202 op_fle = fle + 1; 203 ip_fle = fle + 2; 204 205 if (likely(bpid < MAX_BPID)) { 206 DPAA2_SET_FD_BPID(fd, bpid); 207 DPAA2_SET_FLE_BPID(op_fle, bpid); 208 DPAA2_SET_FLE_BPID(ip_fle, bpid); 209 } else { 210 DPAA2_SET_FD_IVP(fd); 211 DPAA2_SET_FLE_IVP(op_fle); 212 DPAA2_SET_FLE_IVP(ip_fle); 213 } 214 215 /* Configure FD as a FRAME LIST */ 216 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 217 DPAA2_SET_FD_COMPOUND_FMT(fd); 218 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 219 220 /* Configure Output FLE with dst mbuf data */ 221 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf)); 222 DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off); 223 DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len); 224 225 /* Configure Input FLE with src mbuf data */ 226 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf)); 227 DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off); 228 DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len); 229 230 DPAA2_SET_FD_LEN(fd, ip_fle->length); 231 DPAA2_SET_FLE_FIN(ip_fle); 232 233 /* In case of PDCP, per packet HFN is stored in 234 * mbuf priv after sym_op. 235 */ 236 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) { 237 uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op + 238 sess->pdcp.hfn_ovd_offset); 239 /*enable HFN override override */ 240 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd); 241 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd); 242 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd); 243 } 244 245 return 0; 246 247 } 248 249 static inline int 250 build_proto_fd(dpaa2_sec_session *sess, 251 struct rte_crypto_op *op, 252 struct qbman_fd *fd, uint16_t bpid) 253 { 254 struct rte_crypto_sym_op *sym_op = op->sym; 255 if (sym_op->m_dst) 256 return build_proto_compound_fd(sess, op, fd, bpid); 257 258 struct ctxt_priv *priv = sess->ctxt; 259 struct sec_flow_context *flc; 260 struct rte_mbuf *mbuf = sym_op->m_src; 261 262 if (likely(bpid < MAX_BPID)) 263 DPAA2_SET_FD_BPID(fd, bpid); 264 else 265 DPAA2_SET_FD_IVP(fd); 266 267 /* Save the shared descriptor */ 268 flc = &priv->flc_desc[0].flc; 269 270 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 271 DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off); 272 DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len); 273 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 274 275 /* save physical address of mbuf */ 276 op->sym->aead.digest.phys_addr = mbuf->buf_iova; 277 mbuf->buf_iova = (size_t)op; 278 279 return 0; 280 } 281 #endif 282 283 static inline int 284 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess, 285 struct rte_crypto_op *op, 286 struct qbman_fd *fd, __rte_unused uint16_t bpid) 287 { 288 struct rte_crypto_sym_op *sym_op = op->sym; 289 struct ctxt_priv *priv = sess->ctxt; 290 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 291 struct sec_flow_context *flc; 292 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len; 293 int icv_len = sess->digest_length; 294 uint8_t *old_icv; 295 struct rte_mbuf *mbuf; 296 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 297 sess->iv.offset); 298 299 if (sym_op->m_dst) 300 mbuf = sym_op->m_dst; 301 else 302 mbuf = sym_op->m_src; 303 304 /* first FLE entry used to store mbuf and session ctxt */ 305 fle = (struct qbman_fle *)rte_malloc(NULL, 306 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 307 RTE_CACHE_LINE_SIZE); 308 if (unlikely(!fle)) { 309 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE"); 310 return -ENOMEM; 311 } 312 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 313 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 314 DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv); 315 316 op_fle = fle + 1; 317 ip_fle = fle + 2; 318 sge = fle + 3; 319 320 /* Save the shared descriptor */ 321 flc = &priv->flc_desc[0].flc; 322 323 /* Configure FD as a FRAME LIST */ 324 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 325 DPAA2_SET_FD_COMPOUND_FMT(fd); 326 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 327 328 DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n" 329 "iv-len=%d data_off: 0x%x\n", 330 sym_op->aead.data.offset, 331 sym_op->aead.data.length, 332 sess->digest_length, 333 sess->iv.length, 334 sym_op->m_src->data_off); 335 336 /* Configure Output FLE with Scatter/Gather Entry */ 337 DPAA2_SET_FLE_SG_EXT(op_fle); 338 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 339 340 if (auth_only_len) 341 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len); 342 343 op_fle->length = (sess->dir == DIR_ENC) ? 344 (sym_op->aead.data.length + icv_len) : 345 sym_op->aead.data.length; 346 347 /* Configure Output SGE for Encap/Decap */ 348 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 349 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset); 350 sge->length = mbuf->data_len - sym_op->aead.data.offset; 351 352 mbuf = mbuf->next; 353 /* o/p segs */ 354 while (mbuf) { 355 sge++; 356 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 357 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 358 sge->length = mbuf->data_len; 359 mbuf = mbuf->next; 360 } 361 sge->length -= icv_len; 362 363 if (sess->dir == DIR_ENC) { 364 sge++; 365 DPAA2_SET_FLE_ADDR(sge, 366 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data)); 367 sge->length = icv_len; 368 } 369 DPAA2_SET_FLE_FIN(sge); 370 371 sge++; 372 mbuf = sym_op->m_src; 373 374 /* Configure Input FLE with Scatter/Gather Entry */ 375 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 376 DPAA2_SET_FLE_SG_EXT(ip_fle); 377 DPAA2_SET_FLE_FIN(ip_fle); 378 ip_fle->length = (sess->dir == DIR_ENC) ? 379 (sym_op->aead.data.length + sess->iv.length + auth_only_len) : 380 (sym_op->aead.data.length + sess->iv.length + auth_only_len + 381 icv_len); 382 383 /* Configure Input SGE for Encap/Decap */ 384 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr)); 385 sge->length = sess->iv.length; 386 387 sge++; 388 if (auth_only_len) { 389 DPAA2_SET_FLE_ADDR(sge, 390 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data)); 391 sge->length = auth_only_len; 392 sge++; 393 } 394 395 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 396 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + 397 mbuf->data_off); 398 sge->length = mbuf->data_len - sym_op->aead.data.offset; 399 400 mbuf = mbuf->next; 401 /* i/p segs */ 402 while (mbuf) { 403 sge++; 404 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 405 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 406 sge->length = mbuf->data_len; 407 mbuf = mbuf->next; 408 } 409 410 if (sess->dir == DIR_DEC) { 411 sge++; 412 old_icv = (uint8_t *)(sge + 1); 413 memcpy(old_icv, sym_op->aead.digest.data, icv_len); 414 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 415 sge->length = icv_len; 416 } 417 418 DPAA2_SET_FLE_FIN(sge); 419 if (auth_only_len) { 420 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len); 421 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 422 } 423 DPAA2_SET_FD_LEN(fd, ip_fle->length); 424 425 return 0; 426 } 427 428 static inline int 429 build_authenc_gcm_fd(dpaa2_sec_session *sess, 430 struct rte_crypto_op *op, 431 struct qbman_fd *fd, uint16_t bpid) 432 { 433 struct rte_crypto_sym_op *sym_op = op->sym; 434 struct ctxt_priv *priv = sess->ctxt; 435 struct qbman_fle *fle, *sge; 436 struct sec_flow_context *flc; 437 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len; 438 int icv_len = sess->digest_length, retval; 439 uint8_t *old_icv; 440 struct rte_mbuf *dst; 441 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 442 sess->iv.offset); 443 444 if (sym_op->m_dst) 445 dst = sym_op->m_dst; 446 else 447 dst = sym_op->m_src; 448 449 /* TODO we are using the first FLE entry to store Mbuf and session ctxt. 450 * Currently we donot know which FLE has the mbuf stored. 451 * So while retreiving we can go back 1 FLE from the FD -ADDR 452 * to get the MBUF Addr from the previous FLE. 453 * We can have a better approach to use the inline Mbuf 454 */ 455 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 456 if (retval) { 457 DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE"); 458 return -ENOMEM; 459 } 460 memset(fle, 0, FLE_POOL_BUF_SIZE); 461 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 462 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 463 fle = fle + 1; 464 sge = fle + 2; 465 if (likely(bpid < MAX_BPID)) { 466 DPAA2_SET_FD_BPID(fd, bpid); 467 DPAA2_SET_FLE_BPID(fle, bpid); 468 DPAA2_SET_FLE_BPID(fle + 1, bpid); 469 DPAA2_SET_FLE_BPID(sge, bpid); 470 DPAA2_SET_FLE_BPID(sge + 1, bpid); 471 DPAA2_SET_FLE_BPID(sge + 2, bpid); 472 DPAA2_SET_FLE_BPID(sge + 3, bpid); 473 } else { 474 DPAA2_SET_FD_IVP(fd); 475 DPAA2_SET_FLE_IVP(fle); 476 DPAA2_SET_FLE_IVP((fle + 1)); 477 DPAA2_SET_FLE_IVP(sge); 478 DPAA2_SET_FLE_IVP((sge + 1)); 479 DPAA2_SET_FLE_IVP((sge + 2)); 480 DPAA2_SET_FLE_IVP((sge + 3)); 481 } 482 483 /* Save the shared descriptor */ 484 flc = &priv->flc_desc[0].flc; 485 /* Configure FD as a FRAME LIST */ 486 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 487 DPAA2_SET_FD_COMPOUND_FMT(fd); 488 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 489 490 DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n" 491 "iv-len=%d data_off: 0x%x\n", 492 sym_op->aead.data.offset, 493 sym_op->aead.data.length, 494 sess->digest_length, 495 sess->iv.length, 496 sym_op->m_src->data_off); 497 498 /* Configure Output FLE with Scatter/Gather Entry */ 499 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 500 if (auth_only_len) 501 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 502 fle->length = (sess->dir == DIR_ENC) ? 503 (sym_op->aead.data.length + icv_len) : 504 sym_op->aead.data.length; 505 506 DPAA2_SET_FLE_SG_EXT(fle); 507 508 /* Configure Output SGE for Encap/Decap */ 509 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 510 DPAA2_SET_FLE_OFFSET(sge, dst->data_off + sym_op->aead.data.offset); 511 sge->length = sym_op->aead.data.length; 512 513 if (sess->dir == DIR_ENC) { 514 sge++; 515 DPAA2_SET_FLE_ADDR(sge, 516 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data)); 517 sge->length = sess->digest_length; 518 } 519 DPAA2_SET_FLE_FIN(sge); 520 521 sge++; 522 fle++; 523 524 /* Configure Input FLE with Scatter/Gather Entry */ 525 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 526 DPAA2_SET_FLE_SG_EXT(fle); 527 DPAA2_SET_FLE_FIN(fle); 528 fle->length = (sess->dir == DIR_ENC) ? 529 (sym_op->aead.data.length + sess->iv.length + auth_only_len) : 530 (sym_op->aead.data.length + sess->iv.length + auth_only_len + 531 sess->digest_length); 532 533 /* Configure Input SGE for Encap/Decap */ 534 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr)); 535 sge->length = sess->iv.length; 536 sge++; 537 if (auth_only_len) { 538 DPAA2_SET_FLE_ADDR(sge, 539 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data)); 540 sge->length = auth_only_len; 541 DPAA2_SET_FLE_BPID(sge, bpid); 542 sge++; 543 } 544 545 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 546 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + 547 sym_op->m_src->data_off); 548 sge->length = sym_op->aead.data.length; 549 if (sess->dir == DIR_DEC) { 550 sge++; 551 old_icv = (uint8_t *)(sge + 1); 552 memcpy(old_icv, sym_op->aead.digest.data, 553 sess->digest_length); 554 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 555 sge->length = sess->digest_length; 556 } 557 DPAA2_SET_FLE_FIN(sge); 558 559 if (auth_only_len) { 560 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 561 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 562 } 563 564 DPAA2_SET_FD_LEN(fd, fle->length); 565 return 0; 566 } 567 568 static inline int 569 build_authenc_sg_fd(dpaa2_sec_session *sess, 570 struct rte_crypto_op *op, 571 struct qbman_fd *fd, __rte_unused uint16_t bpid) 572 { 573 struct rte_crypto_sym_op *sym_op = op->sym; 574 struct ctxt_priv *priv = sess->ctxt; 575 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 576 struct sec_flow_context *flc; 577 uint16_t auth_hdr_len = sym_op->cipher.data.offset - 578 sym_op->auth.data.offset; 579 uint16_t auth_tail_len = sym_op->auth.data.length - 580 sym_op->cipher.data.length - auth_hdr_len; 581 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len; 582 int icv_len = sess->digest_length; 583 uint8_t *old_icv; 584 struct rte_mbuf *mbuf; 585 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 586 sess->iv.offset); 587 588 if (sym_op->m_dst) 589 mbuf = sym_op->m_dst; 590 else 591 mbuf = sym_op->m_src; 592 593 /* first FLE entry used to store mbuf and session ctxt */ 594 fle = (struct qbman_fle *)rte_malloc(NULL, 595 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 596 RTE_CACHE_LINE_SIZE); 597 if (unlikely(!fle)) { 598 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE"); 599 return -ENOMEM; 600 } 601 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 602 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 603 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 604 605 op_fle = fle + 1; 606 ip_fle = fle + 2; 607 sge = fle + 3; 608 609 /* Save the shared descriptor */ 610 flc = &priv->flc_desc[0].flc; 611 612 /* Configure FD as a FRAME LIST */ 613 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 614 DPAA2_SET_FD_COMPOUND_FMT(fd); 615 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 616 617 DPAA2_SEC_DP_DEBUG( 618 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n" 619 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", 620 sym_op->auth.data.offset, 621 sym_op->auth.data.length, 622 sess->digest_length, 623 sym_op->cipher.data.offset, 624 sym_op->cipher.data.length, 625 sess->iv.length, 626 sym_op->m_src->data_off); 627 628 /* Configure Output FLE with Scatter/Gather Entry */ 629 DPAA2_SET_FLE_SG_EXT(op_fle); 630 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 631 632 if (auth_only_len) 633 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len); 634 635 op_fle->length = (sess->dir == DIR_ENC) ? 636 (sym_op->cipher.data.length + icv_len) : 637 sym_op->cipher.data.length; 638 639 /* Configure Output SGE for Encap/Decap */ 640 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 641 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset); 642 sge->length = mbuf->data_len - sym_op->auth.data.offset; 643 644 mbuf = mbuf->next; 645 /* o/p segs */ 646 while (mbuf) { 647 sge++; 648 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 649 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 650 sge->length = mbuf->data_len; 651 mbuf = mbuf->next; 652 } 653 sge->length -= icv_len; 654 655 if (sess->dir == DIR_ENC) { 656 sge++; 657 DPAA2_SET_FLE_ADDR(sge, 658 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 659 sge->length = icv_len; 660 } 661 DPAA2_SET_FLE_FIN(sge); 662 663 sge++; 664 mbuf = sym_op->m_src; 665 666 /* Configure Input FLE with Scatter/Gather Entry */ 667 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 668 DPAA2_SET_FLE_SG_EXT(ip_fle); 669 DPAA2_SET_FLE_FIN(ip_fle); 670 ip_fle->length = (sess->dir == DIR_ENC) ? 671 (sym_op->auth.data.length + sess->iv.length) : 672 (sym_op->auth.data.length + sess->iv.length + 673 icv_len); 674 675 /* Configure Input SGE for Encap/Decap */ 676 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 677 sge->length = sess->iv.length; 678 679 sge++; 680 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 681 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + 682 mbuf->data_off); 683 sge->length = mbuf->data_len - sym_op->auth.data.offset; 684 685 mbuf = mbuf->next; 686 /* i/p segs */ 687 while (mbuf) { 688 sge++; 689 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 690 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 691 sge->length = mbuf->data_len; 692 mbuf = mbuf->next; 693 } 694 sge->length -= icv_len; 695 696 if (sess->dir == DIR_DEC) { 697 sge++; 698 old_icv = (uint8_t *)(sge + 1); 699 memcpy(old_icv, sym_op->auth.digest.data, 700 icv_len); 701 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 702 sge->length = icv_len; 703 } 704 705 DPAA2_SET_FLE_FIN(sge); 706 if (auth_only_len) { 707 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len); 708 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 709 } 710 DPAA2_SET_FD_LEN(fd, ip_fle->length); 711 712 return 0; 713 } 714 715 static inline int 716 build_authenc_fd(dpaa2_sec_session *sess, 717 struct rte_crypto_op *op, 718 struct qbman_fd *fd, uint16_t bpid) 719 { 720 struct rte_crypto_sym_op *sym_op = op->sym; 721 struct ctxt_priv *priv = sess->ctxt; 722 struct qbman_fle *fle, *sge; 723 struct sec_flow_context *flc; 724 uint16_t auth_hdr_len = sym_op->cipher.data.offset - 725 sym_op->auth.data.offset; 726 uint16_t auth_tail_len = sym_op->auth.data.length - 727 sym_op->cipher.data.length - auth_hdr_len; 728 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len; 729 730 int icv_len = sess->digest_length, retval; 731 uint8_t *old_icv; 732 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 733 sess->iv.offset); 734 struct rte_mbuf *dst; 735 736 if (sym_op->m_dst) 737 dst = sym_op->m_dst; 738 else 739 dst = sym_op->m_src; 740 741 /* we are using the first FLE entry to store Mbuf. 742 * Currently we donot know which FLE has the mbuf stored. 743 * So while retreiving we can go back 1 FLE from the FD -ADDR 744 * to get the MBUF Addr from the previous FLE. 745 * We can have a better approach to use the inline Mbuf 746 */ 747 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 748 if (retval) { 749 DPAA2_SEC_ERR("Memory alloc failed for SGE"); 750 return -ENOMEM; 751 } 752 memset(fle, 0, FLE_POOL_BUF_SIZE); 753 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 754 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 755 fle = fle + 1; 756 sge = fle + 2; 757 if (likely(bpid < MAX_BPID)) { 758 DPAA2_SET_FD_BPID(fd, bpid); 759 DPAA2_SET_FLE_BPID(fle, bpid); 760 DPAA2_SET_FLE_BPID(fle + 1, bpid); 761 DPAA2_SET_FLE_BPID(sge, bpid); 762 DPAA2_SET_FLE_BPID(sge + 1, bpid); 763 DPAA2_SET_FLE_BPID(sge + 2, bpid); 764 DPAA2_SET_FLE_BPID(sge + 3, bpid); 765 } else { 766 DPAA2_SET_FD_IVP(fd); 767 DPAA2_SET_FLE_IVP(fle); 768 DPAA2_SET_FLE_IVP((fle + 1)); 769 DPAA2_SET_FLE_IVP(sge); 770 DPAA2_SET_FLE_IVP((sge + 1)); 771 DPAA2_SET_FLE_IVP((sge + 2)); 772 DPAA2_SET_FLE_IVP((sge + 3)); 773 } 774 775 /* Save the shared descriptor */ 776 flc = &priv->flc_desc[0].flc; 777 /* Configure FD as a FRAME LIST */ 778 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 779 DPAA2_SET_FD_COMPOUND_FMT(fd); 780 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 781 782 DPAA2_SEC_DP_DEBUG( 783 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n" 784 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", 785 sym_op->auth.data.offset, 786 sym_op->auth.data.length, 787 sess->digest_length, 788 sym_op->cipher.data.offset, 789 sym_op->cipher.data.length, 790 sess->iv.length, 791 sym_op->m_src->data_off); 792 793 /* Configure Output FLE with Scatter/Gather Entry */ 794 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 795 if (auth_only_len) 796 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 797 fle->length = (sess->dir == DIR_ENC) ? 798 (sym_op->cipher.data.length + icv_len) : 799 sym_op->cipher.data.length; 800 801 DPAA2_SET_FLE_SG_EXT(fle); 802 803 /* Configure Output SGE for Encap/Decap */ 804 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 805 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + 806 dst->data_off); 807 sge->length = sym_op->cipher.data.length; 808 809 if (sess->dir == DIR_ENC) { 810 sge++; 811 DPAA2_SET_FLE_ADDR(sge, 812 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 813 sge->length = sess->digest_length; 814 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 815 sess->iv.length)); 816 } 817 DPAA2_SET_FLE_FIN(sge); 818 819 sge++; 820 fle++; 821 822 /* Configure Input FLE with Scatter/Gather Entry */ 823 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 824 DPAA2_SET_FLE_SG_EXT(fle); 825 DPAA2_SET_FLE_FIN(fle); 826 fle->length = (sess->dir == DIR_ENC) ? 827 (sym_op->auth.data.length + sess->iv.length) : 828 (sym_op->auth.data.length + sess->iv.length + 829 sess->digest_length); 830 831 /* Configure Input SGE for Encap/Decap */ 832 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 833 sge->length = sess->iv.length; 834 sge++; 835 836 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 837 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + 838 sym_op->m_src->data_off); 839 sge->length = sym_op->auth.data.length; 840 if (sess->dir == DIR_DEC) { 841 sge++; 842 old_icv = (uint8_t *)(sge + 1); 843 memcpy(old_icv, sym_op->auth.digest.data, 844 sess->digest_length); 845 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 846 sge->length = sess->digest_length; 847 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 848 sess->digest_length + 849 sess->iv.length)); 850 } 851 DPAA2_SET_FLE_FIN(sge); 852 if (auth_only_len) { 853 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 854 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 855 } 856 return 0; 857 } 858 859 static inline int build_auth_sg_fd( 860 dpaa2_sec_session *sess, 861 struct rte_crypto_op *op, 862 struct qbman_fd *fd, 863 __rte_unused uint16_t bpid) 864 { 865 struct rte_crypto_sym_op *sym_op = op->sym; 866 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 867 struct sec_flow_context *flc; 868 struct ctxt_priv *priv = sess->ctxt; 869 int data_len, data_offset; 870 uint8_t *old_digest; 871 struct rte_mbuf *mbuf; 872 873 data_len = sym_op->auth.data.length; 874 data_offset = sym_op->auth.data.offset; 875 876 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 877 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 878 if ((data_len & 7) || (data_offset & 7)) { 879 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes"); 880 return -ENOTSUP; 881 } 882 883 data_len = data_len >> 3; 884 data_offset = data_offset >> 3; 885 } 886 887 mbuf = sym_op->m_src; 888 fle = (struct qbman_fle *)rte_malloc(NULL, 889 FLE_SG_MEM_SIZE(mbuf->nb_segs), 890 RTE_CACHE_LINE_SIZE); 891 if (unlikely(!fle)) { 892 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE"); 893 return -ENOMEM; 894 } 895 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs)); 896 /* first FLE entry used to store mbuf and session ctxt */ 897 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 898 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 899 op_fle = fle + 1; 900 ip_fle = fle + 2; 901 sge = fle + 3; 902 903 flc = &priv->flc_desc[DESC_INITFINAL].flc; 904 /* sg FD */ 905 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 906 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 907 DPAA2_SET_FD_COMPOUND_FMT(fd); 908 909 /* o/p fle */ 910 DPAA2_SET_FLE_ADDR(op_fle, 911 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 912 op_fle->length = sess->digest_length; 913 914 /* i/p fle */ 915 DPAA2_SET_FLE_SG_EXT(ip_fle); 916 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 917 ip_fle->length = data_len; 918 919 if (sess->iv.length) { 920 uint8_t *iv_ptr; 921 922 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 923 sess->iv.offset); 924 925 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 926 iv_ptr = conv_to_snow_f9_iv(iv_ptr); 927 sge->length = 12; 928 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 929 iv_ptr = conv_to_zuc_eia_iv(iv_ptr); 930 sge->length = 8; 931 } else { 932 sge->length = sess->iv.length; 933 } 934 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 935 ip_fle->length += sge->length; 936 sge++; 937 } 938 /* i/p 1st seg */ 939 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 940 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off); 941 942 if (data_len <= (mbuf->data_len - data_offset)) { 943 sge->length = data_len; 944 data_len = 0; 945 } else { 946 sge->length = mbuf->data_len - data_offset; 947 948 /* remaining i/p segs */ 949 while ((data_len = data_len - sge->length) && 950 (mbuf = mbuf->next)) { 951 sge++; 952 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 953 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 954 if (data_len > mbuf->data_len) 955 sge->length = mbuf->data_len; 956 else 957 sge->length = data_len; 958 } 959 } 960 961 if (sess->dir == DIR_DEC) { 962 /* Digest verification case */ 963 sge++; 964 old_digest = (uint8_t *)(sge + 1); 965 rte_memcpy(old_digest, sym_op->auth.digest.data, 966 sess->digest_length); 967 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest)); 968 sge->length = sess->digest_length; 969 ip_fle->length += sess->digest_length; 970 } 971 DPAA2_SET_FLE_FIN(sge); 972 DPAA2_SET_FLE_FIN(ip_fle); 973 DPAA2_SET_FD_LEN(fd, ip_fle->length); 974 975 return 0; 976 } 977 978 static inline int 979 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 980 struct qbman_fd *fd, uint16_t bpid) 981 { 982 struct rte_crypto_sym_op *sym_op = op->sym; 983 struct qbman_fle *fle, *sge; 984 struct sec_flow_context *flc; 985 struct ctxt_priv *priv = sess->ctxt; 986 int data_len, data_offset; 987 uint8_t *old_digest; 988 int retval; 989 990 data_len = sym_op->auth.data.length; 991 data_offset = sym_op->auth.data.offset; 992 993 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 994 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 995 if ((data_len & 7) || (data_offset & 7)) { 996 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes"); 997 return -ENOTSUP; 998 } 999 1000 data_len = data_len >> 3; 1001 data_offset = data_offset >> 3; 1002 } 1003 1004 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 1005 if (retval) { 1006 DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE"); 1007 return -ENOMEM; 1008 } 1009 memset(fle, 0, FLE_POOL_BUF_SIZE); 1010 /* TODO we are using the first FLE entry to store Mbuf. 1011 * Currently we donot know which FLE has the mbuf stored. 1012 * So while retreiving we can go back 1 FLE from the FD -ADDR 1013 * to get the MBUF Addr from the previous FLE. 1014 * We can have a better approach to use the inline Mbuf 1015 */ 1016 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 1017 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 1018 fle = fle + 1; 1019 sge = fle + 2; 1020 1021 if (likely(bpid < MAX_BPID)) { 1022 DPAA2_SET_FD_BPID(fd, bpid); 1023 DPAA2_SET_FLE_BPID(fle, bpid); 1024 DPAA2_SET_FLE_BPID(fle + 1, bpid); 1025 DPAA2_SET_FLE_BPID(sge, bpid); 1026 DPAA2_SET_FLE_BPID(sge + 1, bpid); 1027 } else { 1028 DPAA2_SET_FD_IVP(fd); 1029 DPAA2_SET_FLE_IVP(fle); 1030 DPAA2_SET_FLE_IVP((fle + 1)); 1031 DPAA2_SET_FLE_IVP(sge); 1032 DPAA2_SET_FLE_IVP((sge + 1)); 1033 } 1034 1035 flc = &priv->flc_desc[DESC_INITFINAL].flc; 1036 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1037 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 1038 DPAA2_SET_FD_COMPOUND_FMT(fd); 1039 1040 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 1041 fle->length = sess->digest_length; 1042 fle++; 1043 1044 /* Setting input FLE */ 1045 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 1046 DPAA2_SET_FLE_SG_EXT(fle); 1047 fle->length = data_len; 1048 1049 if (sess->iv.length) { 1050 uint8_t *iv_ptr; 1051 1052 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1053 sess->iv.offset); 1054 1055 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 1056 iv_ptr = conv_to_snow_f9_iv(iv_ptr); 1057 sge->length = 12; 1058 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 1059 iv_ptr = conv_to_zuc_eia_iv(iv_ptr); 1060 sge->length = 8; 1061 } else { 1062 sge->length = sess->iv.length; 1063 } 1064 1065 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1066 fle->length = fle->length + sge->length; 1067 sge++; 1068 } 1069 1070 /* Setting data to authenticate */ 1071 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 1072 DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off); 1073 sge->length = data_len; 1074 1075 if (sess->dir == DIR_DEC) { 1076 sge++; 1077 old_digest = (uint8_t *)(sge + 1); 1078 rte_memcpy(old_digest, sym_op->auth.digest.data, 1079 sess->digest_length); 1080 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest)); 1081 sge->length = sess->digest_length; 1082 fle->length = fle->length + sess->digest_length; 1083 } 1084 1085 DPAA2_SET_FLE_FIN(sge); 1086 DPAA2_SET_FLE_FIN(fle); 1087 DPAA2_SET_FD_LEN(fd, fle->length); 1088 1089 return 0; 1090 } 1091 1092 static int 1093 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 1094 struct qbman_fd *fd, __rte_unused uint16_t bpid) 1095 { 1096 struct rte_crypto_sym_op *sym_op = op->sym; 1097 struct qbman_fle *ip_fle, *op_fle, *sge, *fle; 1098 int data_len, data_offset; 1099 struct sec_flow_context *flc; 1100 struct ctxt_priv *priv = sess->ctxt; 1101 struct rte_mbuf *mbuf; 1102 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1103 sess->iv.offset); 1104 1105 data_len = sym_op->cipher.data.length; 1106 data_offset = sym_op->cipher.data.offset; 1107 1108 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 1109 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 1110 if ((data_len & 7) || (data_offset & 7)) { 1111 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes"); 1112 return -ENOTSUP; 1113 } 1114 1115 data_len = data_len >> 3; 1116 data_offset = data_offset >> 3; 1117 } 1118 1119 if (sym_op->m_dst) 1120 mbuf = sym_op->m_dst; 1121 else 1122 mbuf = sym_op->m_src; 1123 1124 /* first FLE entry used to store mbuf and session ctxt */ 1125 fle = (struct qbman_fle *)rte_malloc(NULL, 1126 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 1127 RTE_CACHE_LINE_SIZE); 1128 if (!fle) { 1129 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE"); 1130 return -ENOMEM; 1131 } 1132 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 1133 /* first FLE entry used to store mbuf and session ctxt */ 1134 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 1135 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 1136 1137 op_fle = fle + 1; 1138 ip_fle = fle + 2; 1139 sge = fle + 3; 1140 1141 flc = &priv->flc_desc[0].flc; 1142 1143 DPAA2_SEC_DP_DEBUG( 1144 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d" 1145 " data_off: 0x%x\n", 1146 data_offset, 1147 data_len, 1148 sess->iv.length, 1149 sym_op->m_src->data_off); 1150 1151 /* o/p fle */ 1152 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 1153 op_fle->length = data_len; 1154 DPAA2_SET_FLE_SG_EXT(op_fle); 1155 1156 /* o/p 1st seg */ 1157 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1158 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off); 1159 sge->length = mbuf->data_len - data_offset; 1160 1161 mbuf = mbuf->next; 1162 /* o/p segs */ 1163 while (mbuf) { 1164 sge++; 1165 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1166 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 1167 sge->length = mbuf->data_len; 1168 mbuf = mbuf->next; 1169 } 1170 DPAA2_SET_FLE_FIN(sge); 1171 1172 DPAA2_SEC_DP_DEBUG( 1173 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n", 1174 flc, fle, fle->addr_hi, fle->addr_lo, 1175 fle->length); 1176 1177 /* i/p fle */ 1178 mbuf = sym_op->m_src; 1179 sge++; 1180 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 1181 ip_fle->length = sess->iv.length + data_len; 1182 DPAA2_SET_FLE_SG_EXT(ip_fle); 1183 1184 /* i/p IV */ 1185 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1186 DPAA2_SET_FLE_OFFSET(sge, 0); 1187 sge->length = sess->iv.length; 1188 1189 sge++; 1190 1191 /* i/p 1st seg */ 1192 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1193 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off); 1194 sge->length = mbuf->data_len - data_offset; 1195 1196 mbuf = mbuf->next; 1197 /* i/p segs */ 1198 while (mbuf) { 1199 sge++; 1200 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1201 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 1202 sge->length = mbuf->data_len; 1203 mbuf = mbuf->next; 1204 } 1205 DPAA2_SET_FLE_FIN(sge); 1206 DPAA2_SET_FLE_FIN(ip_fle); 1207 1208 /* sg fd */ 1209 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 1210 DPAA2_SET_FD_LEN(fd, ip_fle->length); 1211 DPAA2_SET_FD_COMPOUND_FMT(fd); 1212 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1213 1214 DPAA2_SEC_DP_DEBUG( 1215 "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d" 1216 " off =%d, len =%d\n", 1217 DPAA2_GET_FD_ADDR(fd), 1218 DPAA2_GET_FD_BPID(fd), 1219 rte_dpaa2_bpid_info[bpid].meta_data_size, 1220 DPAA2_GET_FD_OFFSET(fd), 1221 DPAA2_GET_FD_LEN(fd)); 1222 return 0; 1223 } 1224 1225 static int 1226 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 1227 struct qbman_fd *fd, uint16_t bpid) 1228 { 1229 struct rte_crypto_sym_op *sym_op = op->sym; 1230 struct qbman_fle *fle, *sge; 1231 int retval, data_len, data_offset; 1232 struct sec_flow_context *flc; 1233 struct ctxt_priv *priv = sess->ctxt; 1234 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1235 sess->iv.offset); 1236 struct rte_mbuf *dst; 1237 1238 data_len = sym_op->cipher.data.length; 1239 data_offset = sym_op->cipher.data.offset; 1240 1241 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 1242 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 1243 if ((data_len & 7) || (data_offset & 7)) { 1244 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes"); 1245 return -ENOTSUP; 1246 } 1247 1248 data_len = data_len >> 3; 1249 data_offset = data_offset >> 3; 1250 } 1251 1252 if (sym_op->m_dst) 1253 dst = sym_op->m_dst; 1254 else 1255 dst = sym_op->m_src; 1256 1257 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 1258 if (retval) { 1259 DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE"); 1260 return -ENOMEM; 1261 } 1262 memset(fle, 0, FLE_POOL_BUF_SIZE); 1263 /* TODO we are using the first FLE entry to store Mbuf. 1264 * Currently we donot know which FLE has the mbuf stored. 1265 * So while retreiving we can go back 1 FLE from the FD -ADDR 1266 * to get the MBUF Addr from the previous FLE. 1267 * We can have a better approach to use the inline Mbuf 1268 */ 1269 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 1270 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 1271 fle = fle + 1; 1272 sge = fle + 2; 1273 1274 if (likely(bpid < MAX_BPID)) { 1275 DPAA2_SET_FD_BPID(fd, bpid); 1276 DPAA2_SET_FLE_BPID(fle, bpid); 1277 DPAA2_SET_FLE_BPID(fle + 1, bpid); 1278 DPAA2_SET_FLE_BPID(sge, bpid); 1279 DPAA2_SET_FLE_BPID(sge + 1, bpid); 1280 } else { 1281 DPAA2_SET_FD_IVP(fd); 1282 DPAA2_SET_FLE_IVP(fle); 1283 DPAA2_SET_FLE_IVP((fle + 1)); 1284 DPAA2_SET_FLE_IVP(sge); 1285 DPAA2_SET_FLE_IVP((sge + 1)); 1286 } 1287 1288 flc = &priv->flc_desc[0].flc; 1289 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 1290 DPAA2_SET_FD_LEN(fd, data_len + sess->iv.length); 1291 DPAA2_SET_FD_COMPOUND_FMT(fd); 1292 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1293 1294 DPAA2_SEC_DP_DEBUG( 1295 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d," 1296 " data_off: 0x%x\n", 1297 data_offset, 1298 data_len, 1299 sess->iv.length, 1300 sym_op->m_src->data_off); 1301 1302 DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 1303 DPAA2_SET_FLE_OFFSET(fle, data_offset + dst->data_off); 1304 1305 fle->length = data_len + sess->iv.length; 1306 1307 DPAA2_SEC_DP_DEBUG( 1308 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n", 1309 flc, fle, fle->addr_hi, fle->addr_lo, 1310 fle->length); 1311 1312 fle++; 1313 1314 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 1315 fle->length = data_len + sess->iv.length; 1316 1317 DPAA2_SET_FLE_SG_EXT(fle); 1318 1319 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1320 sge->length = sess->iv.length; 1321 1322 sge++; 1323 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 1324 DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off); 1325 1326 sge->length = data_len; 1327 DPAA2_SET_FLE_FIN(sge); 1328 DPAA2_SET_FLE_FIN(fle); 1329 1330 DPAA2_SEC_DP_DEBUG( 1331 "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d" 1332 " off =%d, len =%d\n", 1333 DPAA2_GET_FD_ADDR(fd), 1334 DPAA2_GET_FD_BPID(fd), 1335 rte_dpaa2_bpid_info[bpid].meta_data_size, 1336 DPAA2_GET_FD_OFFSET(fd), 1337 DPAA2_GET_FD_LEN(fd)); 1338 1339 return 0; 1340 } 1341 1342 static inline int 1343 build_sec_fd(struct rte_crypto_op *op, 1344 struct qbman_fd *fd, uint16_t bpid) 1345 { 1346 int ret = -1; 1347 dpaa2_sec_session *sess; 1348 1349 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) 1350 sess = (dpaa2_sec_session *)get_sym_session_private_data( 1351 op->sym->session, cryptodev_driver_id); 1352 #ifdef RTE_LIB_SECURITY 1353 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) 1354 sess = (dpaa2_sec_session *)get_sec_session_private_data( 1355 op->sym->sec_session); 1356 #endif 1357 else 1358 return -ENOTSUP; 1359 1360 if (!sess) 1361 return -EINVAL; 1362 1363 /* Any of the buffer is segmented*/ 1364 if (!rte_pktmbuf_is_contiguous(op->sym->m_src) || 1365 ((op->sym->m_dst != NULL) && 1366 !rte_pktmbuf_is_contiguous(op->sym->m_dst))) { 1367 switch (sess->ctxt_type) { 1368 case DPAA2_SEC_CIPHER: 1369 ret = build_cipher_sg_fd(sess, op, fd, bpid); 1370 break; 1371 case DPAA2_SEC_AUTH: 1372 ret = build_auth_sg_fd(sess, op, fd, bpid); 1373 break; 1374 case DPAA2_SEC_AEAD: 1375 ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid); 1376 break; 1377 case DPAA2_SEC_CIPHER_HASH: 1378 ret = build_authenc_sg_fd(sess, op, fd, bpid); 1379 break; 1380 #ifdef RTE_LIB_SECURITY 1381 case DPAA2_SEC_IPSEC: 1382 case DPAA2_SEC_PDCP: 1383 ret = build_proto_compound_sg_fd(sess, op, fd, bpid); 1384 break; 1385 #endif 1386 case DPAA2_SEC_HASH_CIPHER: 1387 default: 1388 DPAA2_SEC_ERR("error: Unsupported session"); 1389 } 1390 } else { 1391 switch (sess->ctxt_type) { 1392 case DPAA2_SEC_CIPHER: 1393 ret = build_cipher_fd(sess, op, fd, bpid); 1394 break; 1395 case DPAA2_SEC_AUTH: 1396 ret = build_auth_fd(sess, op, fd, bpid); 1397 break; 1398 case DPAA2_SEC_AEAD: 1399 ret = build_authenc_gcm_fd(sess, op, fd, bpid); 1400 break; 1401 case DPAA2_SEC_CIPHER_HASH: 1402 ret = build_authenc_fd(sess, op, fd, bpid); 1403 break; 1404 #ifdef RTE_LIB_SECURITY 1405 case DPAA2_SEC_IPSEC: 1406 ret = build_proto_fd(sess, op, fd, bpid); 1407 break; 1408 case DPAA2_SEC_PDCP: 1409 ret = build_proto_compound_fd(sess, op, fd, bpid); 1410 break; 1411 #endif 1412 case DPAA2_SEC_HASH_CIPHER: 1413 default: 1414 DPAA2_SEC_ERR("error: Unsupported session"); 1415 ret = -ENOTSUP; 1416 } 1417 } 1418 return ret; 1419 } 1420 1421 static uint16_t 1422 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, 1423 uint16_t nb_ops) 1424 { 1425 /* Function to transmit the frames to given device and VQ*/ 1426 uint32_t loop; 1427 int32_t ret; 1428 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 1429 uint32_t frames_to_send, retry_count; 1430 struct qbman_eq_desc eqdesc; 1431 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 1432 struct qbman_swp *swp; 1433 uint16_t num_tx = 0; 1434 uint32_t flags[MAX_TX_RING_SLOTS] = {0}; 1435 /*todo - need to support multiple buffer pools */ 1436 uint16_t bpid; 1437 struct rte_mempool *mb_pool; 1438 1439 if (unlikely(nb_ops == 0)) 1440 return 0; 1441 1442 if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { 1443 DPAA2_SEC_ERR("sessionless crypto op not supported"); 1444 return 0; 1445 } 1446 /*Prepare enqueue descriptor*/ 1447 qbman_eq_desc_clear(&eqdesc); 1448 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); 1449 qbman_eq_desc_set_response(&eqdesc, 0, 0); 1450 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid); 1451 1452 if (!DPAA2_PER_LCORE_DPIO) { 1453 ret = dpaa2_affine_qbman_swp(); 1454 if (ret) { 1455 DPAA2_SEC_ERR( 1456 "Failed to allocate IO portal, tid: %d\n", 1457 rte_gettid()); 1458 return 0; 1459 } 1460 } 1461 swp = DPAA2_PER_LCORE_PORTAL; 1462 1463 while (nb_ops) { 1464 frames_to_send = (nb_ops > dpaa2_eqcr_size) ? 1465 dpaa2_eqcr_size : nb_ops; 1466 1467 for (loop = 0; loop < frames_to_send; loop++) { 1468 if (*dpaa2_seqn((*ops)->sym->m_src)) { 1469 uint8_t dqrr_index = 1470 *dpaa2_seqn((*ops)->sym->m_src) - 1; 1471 1472 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index; 1473 DPAA2_PER_LCORE_DQRR_SIZE--; 1474 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index); 1475 *dpaa2_seqn((*ops)->sym->m_src) = 1476 DPAA2_INVALID_MBUF_SEQN; 1477 } 1478 1479 /*Clear the unused FD fields before sending*/ 1480 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd)); 1481 mb_pool = (*ops)->sym->m_src->pool; 1482 bpid = mempool_to_bpid(mb_pool); 1483 ret = build_sec_fd(*ops, &fd_arr[loop], bpid); 1484 if (ret) { 1485 DPAA2_SEC_ERR("error: Improper packet contents" 1486 " for crypto operation"); 1487 goto skip_tx; 1488 } 1489 ops++; 1490 } 1491 1492 loop = 0; 1493 retry_count = 0; 1494 while (loop < frames_to_send) { 1495 ret = qbman_swp_enqueue_multiple(swp, &eqdesc, 1496 &fd_arr[loop], 1497 &flags[loop], 1498 frames_to_send - loop); 1499 if (unlikely(ret < 0)) { 1500 retry_count++; 1501 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) { 1502 num_tx += loop; 1503 nb_ops -= loop; 1504 goto skip_tx; 1505 } 1506 } else { 1507 loop += ret; 1508 retry_count = 0; 1509 } 1510 } 1511 1512 num_tx += loop; 1513 nb_ops -= loop; 1514 } 1515 skip_tx: 1516 dpaa2_qp->tx_vq.tx_pkts += num_tx; 1517 dpaa2_qp->tx_vq.err_pkts += nb_ops; 1518 return num_tx; 1519 } 1520 1521 #ifdef RTE_LIB_SECURITY 1522 static inline struct rte_crypto_op * 1523 sec_simple_fd_to_mbuf(const struct qbman_fd *fd) 1524 { 1525 struct rte_crypto_op *op; 1526 uint16_t len = DPAA2_GET_FD_LEN(fd); 1527 int16_t diff = 0; 1528 dpaa2_sec_session *sess_priv __rte_unused; 1529 1530 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF( 1531 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)), 1532 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 1533 1534 diff = len - mbuf->pkt_len; 1535 mbuf->pkt_len += diff; 1536 mbuf->data_len += diff; 1537 op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova; 1538 mbuf->buf_iova = op->sym->aead.digest.phys_addr; 1539 op->sym->aead.digest.phys_addr = 0L; 1540 1541 sess_priv = (dpaa2_sec_session *)get_sec_session_private_data( 1542 op->sym->sec_session); 1543 if (sess_priv->dir == DIR_ENC) 1544 mbuf->data_off += SEC_FLC_DHR_OUTBOUND; 1545 else 1546 mbuf->data_off += SEC_FLC_DHR_INBOUND; 1547 1548 return op; 1549 } 1550 #endif 1551 1552 static inline struct rte_crypto_op * 1553 sec_fd_to_mbuf(const struct qbman_fd *fd) 1554 { 1555 struct qbman_fle *fle; 1556 struct rte_crypto_op *op; 1557 struct ctxt_priv *priv; 1558 struct rte_mbuf *dst, *src; 1559 1560 #ifdef RTE_LIB_SECURITY 1561 if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single) 1562 return sec_simple_fd_to_mbuf(fd); 1563 #endif 1564 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); 1565 1566 DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n", 1567 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset); 1568 1569 /* we are using the first FLE entry to store Mbuf. 1570 * Currently we donot know which FLE has the mbuf stored. 1571 * So while retreiving we can go back 1 FLE from the FD -ADDR 1572 * to get the MBUF Addr from the previous FLE. 1573 * We can have a better approach to use the inline Mbuf 1574 */ 1575 1576 if (unlikely(DPAA2_GET_FD_IVP(fd))) { 1577 /* TODO complete it. */ 1578 DPAA2_SEC_ERR("error: non inline buffer"); 1579 return NULL; 1580 } 1581 op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1)); 1582 1583 /* Prefeth op */ 1584 src = op->sym->m_src; 1585 rte_prefetch0(src); 1586 1587 if (op->sym->m_dst) { 1588 dst = op->sym->m_dst; 1589 rte_prefetch0(dst); 1590 } else 1591 dst = src; 1592 1593 #ifdef RTE_LIB_SECURITY 1594 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 1595 uint16_t len = DPAA2_GET_FD_LEN(fd); 1596 dst->pkt_len = len; 1597 while (dst->next != NULL) { 1598 len -= dst->data_len; 1599 dst = dst->next; 1600 } 1601 dst->data_len = len; 1602 } 1603 #endif 1604 DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p," 1605 " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n", 1606 (void *)dst, 1607 dst->buf_addr, 1608 DPAA2_GET_FD_ADDR(fd), 1609 DPAA2_GET_FD_BPID(fd), 1610 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, 1611 DPAA2_GET_FD_OFFSET(fd), 1612 DPAA2_GET_FD_LEN(fd)); 1613 1614 /* free the fle memory */ 1615 if (likely(rte_pktmbuf_is_contiguous(src))) { 1616 priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1); 1617 rte_mempool_put(priv->fle_pool, (void *)(fle-1)); 1618 } else 1619 rte_free((void *)(fle-1)); 1620 1621 return op; 1622 } 1623 1624 static uint16_t 1625 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, 1626 uint16_t nb_ops) 1627 { 1628 /* Function is responsible to receive frames for a given device and VQ*/ 1629 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 1630 struct qbman_result *dq_storage; 1631 uint32_t fqid = dpaa2_qp->rx_vq.fqid; 1632 int ret, num_rx = 0; 1633 uint8_t is_last = 0, status; 1634 struct qbman_swp *swp; 1635 const struct qbman_fd *fd; 1636 struct qbman_pull_desc pulldesc; 1637 1638 if (!DPAA2_PER_LCORE_DPIO) { 1639 ret = dpaa2_affine_qbman_swp(); 1640 if (ret) { 1641 DPAA2_SEC_ERR( 1642 "Failed to allocate IO portal, tid: %d\n", 1643 rte_gettid()); 1644 return 0; 1645 } 1646 } 1647 swp = DPAA2_PER_LCORE_PORTAL; 1648 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0]; 1649 1650 qbman_pull_desc_clear(&pulldesc); 1651 qbman_pull_desc_set_numframes(&pulldesc, 1652 (nb_ops > dpaa2_dqrr_size) ? 1653 dpaa2_dqrr_size : nb_ops); 1654 qbman_pull_desc_set_fq(&pulldesc, fqid); 1655 qbman_pull_desc_set_storage(&pulldesc, dq_storage, 1656 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage), 1657 1); 1658 1659 /*Issue a volatile dequeue command. */ 1660 while (1) { 1661 if (qbman_swp_pull(swp, &pulldesc)) { 1662 DPAA2_SEC_WARN( 1663 "SEC VDQ command is not issued : QBMAN busy"); 1664 /* Portal was busy, try again */ 1665 continue; 1666 } 1667 break; 1668 }; 1669 1670 /* Receive the packets till Last Dequeue entry is found with 1671 * respect to the above issues PULL command. 1672 */ 1673 while (!is_last) { 1674 /* Check if the previous issued command is completed. 1675 * Also seems like the SWP is shared between the Ethernet Driver 1676 * and the SEC driver. 1677 */ 1678 while (!qbman_check_command_complete(dq_storage)) 1679 ; 1680 1681 /* Loop until the dq_storage is updated with 1682 * new token by QBMAN 1683 */ 1684 while (!qbman_check_new_result(dq_storage)) 1685 ; 1686 /* Check whether Last Pull command is Expired and 1687 * setting Condition for Loop termination 1688 */ 1689 if (qbman_result_DQ_is_pull_complete(dq_storage)) { 1690 is_last = 1; 1691 /* Check for valid frame. */ 1692 status = (uint8_t)qbman_result_DQ_flags(dq_storage); 1693 if (unlikely( 1694 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) { 1695 DPAA2_SEC_DP_DEBUG("No frame is delivered\n"); 1696 continue; 1697 } 1698 } 1699 1700 fd = qbman_result_DQ_fd(dq_storage); 1701 ops[num_rx] = sec_fd_to_mbuf(fd); 1702 1703 if (unlikely(fd->simple.frc)) { 1704 /* TODO Parse SEC errors */ 1705 DPAA2_SEC_DP_ERR("SEC returned Error - %x\n", 1706 fd->simple.frc); 1707 dpaa2_qp->rx_vq.err_pkts += 1; 1708 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR; 1709 } else { 1710 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 1711 } 1712 1713 num_rx++; 1714 dq_storage++; 1715 } /* End of Packet Rx loop */ 1716 1717 dpaa2_qp->rx_vq.rx_pkts += num_rx; 1718 1719 DPAA2_SEC_DP_DEBUG("SEC RX pkts %d err pkts %" PRIu64 "\n", num_rx, 1720 dpaa2_qp->rx_vq.err_pkts); 1721 /*Return the total number of packets received to DPAA2 app*/ 1722 return num_rx; 1723 } 1724 1725 /** Release queue pair */ 1726 static int 1727 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) 1728 { 1729 struct dpaa2_sec_qp *qp = 1730 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id]; 1731 1732 PMD_INIT_FUNC_TRACE(); 1733 1734 if (qp->rx_vq.q_storage) { 1735 dpaa2_free_dq_storage(qp->rx_vq.q_storage); 1736 rte_free(qp->rx_vq.q_storage); 1737 } 1738 rte_free(qp); 1739 1740 dev->data->queue_pairs[queue_pair_id] = NULL; 1741 1742 return 0; 1743 } 1744 1745 /** Setup a queue pair */ 1746 static int 1747 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, 1748 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf, 1749 __rte_unused int socket_id) 1750 { 1751 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 1752 struct dpaa2_sec_qp *qp; 1753 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 1754 struct dpseci_rx_queue_cfg cfg; 1755 int32_t retcode; 1756 1757 PMD_INIT_FUNC_TRACE(); 1758 1759 /* If qp is already in use free ring memory and qp metadata. */ 1760 if (dev->data->queue_pairs[qp_id] != NULL) { 1761 DPAA2_SEC_INFO("QP already setup"); 1762 return 0; 1763 } 1764 1765 DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p", 1766 dev, qp_id, qp_conf); 1767 1768 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 1769 1770 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp), 1771 RTE_CACHE_LINE_SIZE); 1772 if (!qp) { 1773 DPAA2_SEC_ERR("malloc failed for rx/tx queues"); 1774 return -ENOMEM; 1775 } 1776 1777 qp->rx_vq.crypto_data = dev->data; 1778 qp->tx_vq.crypto_data = dev->data; 1779 qp->rx_vq.q_storage = rte_malloc("sec dq storage", 1780 sizeof(struct queue_storage_info_t), 1781 RTE_CACHE_LINE_SIZE); 1782 if (!qp->rx_vq.q_storage) { 1783 DPAA2_SEC_ERR("malloc failed for q_storage"); 1784 return -ENOMEM; 1785 } 1786 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t)); 1787 1788 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) { 1789 DPAA2_SEC_ERR("Unable to allocate dequeue storage"); 1790 return -ENOMEM; 1791 } 1792 1793 dev->data->queue_pairs[qp_id] = qp; 1794 1795 cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX; 1796 cfg.user_ctx = (size_t)(&qp->rx_vq); 1797 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 1798 qp_id, &cfg); 1799 return retcode; 1800 } 1801 1802 /** Returns the size of the aesni gcm session structure */ 1803 static unsigned int 1804 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) 1805 { 1806 PMD_INIT_FUNC_TRACE(); 1807 1808 return sizeof(dpaa2_sec_session); 1809 } 1810 1811 static int 1812 dpaa2_sec_cipher_init(struct rte_cryptodev *dev, 1813 struct rte_crypto_sym_xform *xform, 1814 dpaa2_sec_session *session) 1815 { 1816 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1817 struct alginfo cipherdata; 1818 int bufsize, ret = 0; 1819 struct ctxt_priv *priv; 1820 struct sec_flow_context *flc; 1821 1822 PMD_INIT_FUNC_TRACE(); 1823 1824 /* For SEC CIPHER only one descriptor is required. */ 1825 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1826 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 1827 RTE_CACHE_LINE_SIZE); 1828 if (priv == NULL) { 1829 DPAA2_SEC_ERR("No Memory for priv CTXT"); 1830 return -ENOMEM; 1831 } 1832 1833 priv->fle_pool = dev_priv->fle_pool; 1834 1835 flc = &priv->flc_desc[0].flc; 1836 1837 session->ctxt_type = DPAA2_SEC_CIPHER; 1838 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, 1839 RTE_CACHE_LINE_SIZE); 1840 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) { 1841 DPAA2_SEC_ERR("No Memory for cipher key"); 1842 rte_free(priv); 1843 return -ENOMEM; 1844 } 1845 session->cipher_key.length = xform->cipher.key.length; 1846 1847 memcpy(session->cipher_key.data, xform->cipher.key.data, 1848 xform->cipher.key.length); 1849 cipherdata.key = (size_t)session->cipher_key.data; 1850 cipherdata.keylen = session->cipher_key.length; 1851 cipherdata.key_enc_flags = 0; 1852 cipherdata.key_type = RTA_DATA_IMM; 1853 1854 /* Set IV parameters */ 1855 session->iv.offset = xform->cipher.iv.offset; 1856 session->iv.length = xform->cipher.iv.length; 1857 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1858 DIR_ENC : DIR_DEC; 1859 1860 switch (xform->cipher.algo) { 1861 case RTE_CRYPTO_CIPHER_AES_CBC: 1862 cipherdata.algtype = OP_ALG_ALGSEL_AES; 1863 cipherdata.algmode = OP_ALG_AAI_CBC; 1864 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 1865 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 1866 SHR_NEVER, &cipherdata, 1867 session->iv.length, 1868 session->dir); 1869 break; 1870 case RTE_CRYPTO_CIPHER_3DES_CBC: 1871 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 1872 cipherdata.algmode = OP_ALG_AAI_CBC; 1873 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 1874 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 1875 SHR_NEVER, &cipherdata, 1876 session->iv.length, 1877 session->dir); 1878 break; 1879 case RTE_CRYPTO_CIPHER_DES_CBC: 1880 cipherdata.algtype = OP_ALG_ALGSEL_DES; 1881 cipherdata.algmode = OP_ALG_AAI_CBC; 1882 session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC; 1883 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 1884 SHR_NEVER, &cipherdata, 1885 session->iv.length, 1886 session->dir); 1887 break; 1888 case RTE_CRYPTO_CIPHER_AES_CTR: 1889 cipherdata.algtype = OP_ALG_ALGSEL_AES; 1890 cipherdata.algmode = OP_ALG_AAI_CTR; 1891 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 1892 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 1893 SHR_NEVER, &cipherdata, 1894 session->iv.length, 1895 session->dir); 1896 break; 1897 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 1898 cipherdata.algtype = OP_ALG_ALGSEL_SNOW_F8; 1899 session->cipher_alg = RTE_CRYPTO_CIPHER_SNOW3G_UEA2; 1900 bufsize = cnstr_shdsc_snow_f8(priv->flc_desc[0].desc, 1, 0, 1901 &cipherdata, 1902 session->dir); 1903 break; 1904 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 1905 cipherdata.algtype = OP_ALG_ALGSEL_ZUCE; 1906 session->cipher_alg = RTE_CRYPTO_CIPHER_ZUC_EEA3; 1907 bufsize = cnstr_shdsc_zuce(priv->flc_desc[0].desc, 1, 0, 1908 &cipherdata, 1909 session->dir); 1910 break; 1911 case RTE_CRYPTO_CIPHER_KASUMI_F8: 1912 case RTE_CRYPTO_CIPHER_AES_F8: 1913 case RTE_CRYPTO_CIPHER_AES_ECB: 1914 case RTE_CRYPTO_CIPHER_3DES_ECB: 1915 case RTE_CRYPTO_CIPHER_3DES_CTR: 1916 case RTE_CRYPTO_CIPHER_AES_XTS: 1917 case RTE_CRYPTO_CIPHER_ARC4: 1918 case RTE_CRYPTO_CIPHER_NULL: 1919 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", 1920 xform->cipher.algo); 1921 ret = -ENOTSUP; 1922 goto error_out; 1923 default: 1924 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 1925 xform->cipher.algo); 1926 ret = -ENOTSUP; 1927 goto error_out; 1928 } 1929 1930 if (bufsize < 0) { 1931 DPAA2_SEC_ERR("Crypto: Descriptor build failed"); 1932 ret = -EINVAL; 1933 goto error_out; 1934 } 1935 1936 flc->word1_sdl = (uint8_t)bufsize; 1937 session->ctxt = priv; 1938 1939 #ifdef CAAM_DESC_DEBUG 1940 int i; 1941 for (i = 0; i < bufsize; i++) 1942 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]); 1943 #endif 1944 return ret; 1945 1946 error_out: 1947 rte_free(session->cipher_key.data); 1948 rte_free(priv); 1949 return ret; 1950 } 1951 1952 static int 1953 dpaa2_sec_auth_init(struct rte_cryptodev *dev, 1954 struct rte_crypto_sym_xform *xform, 1955 dpaa2_sec_session *session) 1956 { 1957 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1958 struct alginfo authdata; 1959 int bufsize, ret = 0; 1960 struct ctxt_priv *priv; 1961 struct sec_flow_context *flc; 1962 1963 PMD_INIT_FUNC_TRACE(); 1964 1965 /* For SEC AUTH three descriptors are required for various stages */ 1966 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1967 sizeof(struct ctxt_priv) + 3 * 1968 sizeof(struct sec_flc_desc), 1969 RTE_CACHE_LINE_SIZE); 1970 if (priv == NULL) { 1971 DPAA2_SEC_ERR("No Memory for priv CTXT"); 1972 return -ENOMEM; 1973 } 1974 1975 priv->fle_pool = dev_priv->fle_pool; 1976 flc = &priv->flc_desc[DESC_INITFINAL].flc; 1977 1978 session->ctxt_type = DPAA2_SEC_AUTH; 1979 session->auth_key.length = xform->auth.key.length; 1980 if (xform->auth.key.length) { 1981 session->auth_key.data = rte_zmalloc(NULL, 1982 xform->auth.key.length, 1983 RTE_CACHE_LINE_SIZE); 1984 if (session->auth_key.data == NULL) { 1985 DPAA2_SEC_ERR("Unable to allocate memory for auth key"); 1986 rte_free(priv); 1987 return -ENOMEM; 1988 } 1989 memcpy(session->auth_key.data, xform->auth.key.data, 1990 xform->auth.key.length); 1991 authdata.key = (size_t)session->auth_key.data; 1992 authdata.key_enc_flags = 0; 1993 authdata.key_type = RTA_DATA_IMM; 1994 } 1995 authdata.keylen = session->auth_key.length; 1996 1997 session->digest_length = xform->auth.digest_length; 1998 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? 1999 DIR_ENC : DIR_DEC; 2000 2001 switch (xform->auth.algo) { 2002 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2003 authdata.algtype = OP_ALG_ALGSEL_SHA1; 2004 authdata.algmode = OP_ALG_AAI_HMAC; 2005 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 2006 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2007 1, 0, SHR_NEVER, &authdata, 2008 !session->dir, 2009 session->digest_length); 2010 break; 2011 case RTE_CRYPTO_AUTH_MD5_HMAC: 2012 authdata.algtype = OP_ALG_ALGSEL_MD5; 2013 authdata.algmode = OP_ALG_AAI_HMAC; 2014 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 2015 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2016 1, 0, SHR_NEVER, &authdata, 2017 !session->dir, 2018 session->digest_length); 2019 break; 2020 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2021 authdata.algtype = OP_ALG_ALGSEL_SHA256; 2022 authdata.algmode = OP_ALG_AAI_HMAC; 2023 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 2024 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2025 1, 0, SHR_NEVER, &authdata, 2026 !session->dir, 2027 session->digest_length); 2028 break; 2029 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2030 authdata.algtype = OP_ALG_ALGSEL_SHA384; 2031 authdata.algmode = OP_ALG_AAI_HMAC; 2032 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 2033 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2034 1, 0, SHR_NEVER, &authdata, 2035 !session->dir, 2036 session->digest_length); 2037 break; 2038 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2039 authdata.algtype = OP_ALG_ALGSEL_SHA512; 2040 authdata.algmode = OP_ALG_AAI_HMAC; 2041 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 2042 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2043 1, 0, SHR_NEVER, &authdata, 2044 !session->dir, 2045 session->digest_length); 2046 break; 2047 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2048 authdata.algtype = OP_ALG_ALGSEL_SHA224; 2049 authdata.algmode = OP_ALG_AAI_HMAC; 2050 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 2051 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2052 1, 0, SHR_NEVER, &authdata, 2053 !session->dir, 2054 session->digest_length); 2055 break; 2056 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2057 authdata.algtype = OP_ALG_ALGSEL_SNOW_F9; 2058 authdata.algmode = OP_ALG_AAI_F9; 2059 session->auth_alg = RTE_CRYPTO_AUTH_SNOW3G_UIA2; 2060 session->iv.offset = xform->auth.iv.offset; 2061 session->iv.length = xform->auth.iv.length; 2062 bufsize = cnstr_shdsc_snow_f9(priv->flc_desc[DESC_INITFINAL].desc, 2063 1, 0, &authdata, 2064 !session->dir, 2065 session->digest_length); 2066 break; 2067 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2068 authdata.algtype = OP_ALG_ALGSEL_ZUCA; 2069 authdata.algmode = OP_ALG_AAI_F9; 2070 session->auth_alg = RTE_CRYPTO_AUTH_ZUC_EIA3; 2071 session->iv.offset = xform->auth.iv.offset; 2072 session->iv.length = xform->auth.iv.length; 2073 bufsize = cnstr_shdsc_zuca(priv->flc_desc[DESC_INITFINAL].desc, 2074 1, 0, &authdata, 2075 !session->dir, 2076 session->digest_length); 2077 break; 2078 case RTE_CRYPTO_AUTH_SHA1: 2079 authdata.algtype = OP_ALG_ALGSEL_SHA1; 2080 authdata.algmode = OP_ALG_AAI_HASH; 2081 session->auth_alg = RTE_CRYPTO_AUTH_SHA1; 2082 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2083 1, 0, SHR_NEVER, &authdata, 2084 !session->dir, 2085 session->digest_length); 2086 break; 2087 case RTE_CRYPTO_AUTH_MD5: 2088 authdata.algtype = OP_ALG_ALGSEL_MD5; 2089 authdata.algmode = OP_ALG_AAI_HASH; 2090 session->auth_alg = RTE_CRYPTO_AUTH_MD5; 2091 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2092 1, 0, SHR_NEVER, &authdata, 2093 !session->dir, 2094 session->digest_length); 2095 break; 2096 case RTE_CRYPTO_AUTH_SHA256: 2097 authdata.algtype = OP_ALG_ALGSEL_SHA256; 2098 authdata.algmode = OP_ALG_AAI_HASH; 2099 session->auth_alg = RTE_CRYPTO_AUTH_SHA256; 2100 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2101 1, 0, SHR_NEVER, &authdata, 2102 !session->dir, 2103 session->digest_length); 2104 break; 2105 case RTE_CRYPTO_AUTH_SHA384: 2106 authdata.algtype = OP_ALG_ALGSEL_SHA384; 2107 authdata.algmode = OP_ALG_AAI_HASH; 2108 session->auth_alg = RTE_CRYPTO_AUTH_SHA384; 2109 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2110 1, 0, SHR_NEVER, &authdata, 2111 !session->dir, 2112 session->digest_length); 2113 break; 2114 case RTE_CRYPTO_AUTH_SHA512: 2115 authdata.algtype = OP_ALG_ALGSEL_SHA512; 2116 authdata.algmode = OP_ALG_AAI_HASH; 2117 session->auth_alg = RTE_CRYPTO_AUTH_SHA512; 2118 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2119 1, 0, SHR_NEVER, &authdata, 2120 !session->dir, 2121 session->digest_length); 2122 break; 2123 case RTE_CRYPTO_AUTH_SHA224: 2124 authdata.algtype = OP_ALG_ALGSEL_SHA224; 2125 authdata.algmode = OP_ALG_AAI_HASH; 2126 session->auth_alg = RTE_CRYPTO_AUTH_SHA224; 2127 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2128 1, 0, SHR_NEVER, &authdata, 2129 !session->dir, 2130 session->digest_length); 2131 break; 2132 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2133 authdata.algtype = OP_ALG_ALGSEL_AES; 2134 authdata.algmode = OP_ALG_AAI_XCBC_MAC; 2135 session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC; 2136 bufsize = cnstr_shdsc_aes_mac( 2137 priv->flc_desc[DESC_INITFINAL].desc, 2138 1, 0, SHR_NEVER, &authdata, 2139 !session->dir, 2140 session->digest_length); 2141 break; 2142 case RTE_CRYPTO_AUTH_AES_CMAC: 2143 authdata.algtype = OP_ALG_ALGSEL_AES; 2144 authdata.algmode = OP_ALG_AAI_CMAC; 2145 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC; 2146 bufsize = cnstr_shdsc_aes_mac( 2147 priv->flc_desc[DESC_INITFINAL].desc, 2148 1, 0, SHR_NEVER, &authdata, 2149 !session->dir, 2150 session->digest_length); 2151 break; 2152 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 2153 case RTE_CRYPTO_AUTH_AES_GMAC: 2154 case RTE_CRYPTO_AUTH_KASUMI_F9: 2155 case RTE_CRYPTO_AUTH_NULL: 2156 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un", 2157 xform->auth.algo); 2158 ret = -ENOTSUP; 2159 goto error_out; 2160 default: 2161 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", 2162 xform->auth.algo); 2163 ret = -ENOTSUP; 2164 goto error_out; 2165 } 2166 2167 if (bufsize < 0) { 2168 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2169 ret = -EINVAL; 2170 goto error_out; 2171 } 2172 2173 flc->word1_sdl = (uint8_t)bufsize; 2174 session->ctxt = priv; 2175 #ifdef CAAM_DESC_DEBUG 2176 int i; 2177 for (i = 0; i < bufsize; i++) 2178 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", 2179 i, priv->flc_desc[DESC_INITFINAL].desc[i]); 2180 #endif 2181 2182 return ret; 2183 2184 error_out: 2185 rte_free(session->auth_key.data); 2186 rte_free(priv); 2187 return ret; 2188 } 2189 2190 static int 2191 dpaa2_sec_aead_init(struct rte_cryptodev *dev, 2192 struct rte_crypto_sym_xform *xform, 2193 dpaa2_sec_session *session) 2194 { 2195 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt; 2196 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 2197 struct alginfo aeaddata; 2198 int bufsize; 2199 struct ctxt_priv *priv; 2200 struct sec_flow_context *flc; 2201 struct rte_crypto_aead_xform *aead_xform = &xform->aead; 2202 int err, ret = 0; 2203 2204 PMD_INIT_FUNC_TRACE(); 2205 2206 /* Set IV parameters */ 2207 session->iv.offset = aead_xform->iv.offset; 2208 session->iv.length = aead_xform->iv.length; 2209 session->ctxt_type = DPAA2_SEC_AEAD; 2210 2211 /* For SEC AEAD only one descriptor is required */ 2212 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2213 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 2214 RTE_CACHE_LINE_SIZE); 2215 if (priv == NULL) { 2216 DPAA2_SEC_ERR("No Memory for priv CTXT"); 2217 return -ENOMEM; 2218 } 2219 2220 priv->fle_pool = dev_priv->fle_pool; 2221 flc = &priv->flc_desc[0].flc; 2222 2223 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 2224 RTE_CACHE_LINE_SIZE); 2225 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 2226 DPAA2_SEC_ERR("No Memory for aead key"); 2227 rte_free(priv); 2228 return -ENOMEM; 2229 } 2230 memcpy(session->aead_key.data, aead_xform->key.data, 2231 aead_xform->key.length); 2232 2233 session->digest_length = aead_xform->digest_length; 2234 session->aead_key.length = aead_xform->key.length; 2235 ctxt->auth_only_len = aead_xform->aad_length; 2236 2237 aeaddata.key = (size_t)session->aead_key.data; 2238 aeaddata.keylen = session->aead_key.length; 2239 aeaddata.key_enc_flags = 0; 2240 aeaddata.key_type = RTA_DATA_IMM; 2241 2242 switch (aead_xform->algo) { 2243 case RTE_CRYPTO_AEAD_AES_GCM: 2244 aeaddata.algtype = OP_ALG_ALGSEL_AES; 2245 aeaddata.algmode = OP_ALG_AAI_GCM; 2246 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; 2247 break; 2248 case RTE_CRYPTO_AEAD_AES_CCM: 2249 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u", 2250 aead_xform->algo); 2251 ret = -ENOTSUP; 2252 goto error_out; 2253 default: 2254 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u", 2255 aead_xform->algo); 2256 ret = -ENOTSUP; 2257 goto error_out; 2258 } 2259 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 2260 DIR_ENC : DIR_DEC; 2261 2262 priv->flc_desc[0].desc[0] = aeaddata.keylen; 2263 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 2264 DESC_JOB_IO_LEN, 2265 (unsigned int *)priv->flc_desc[0].desc, 2266 &priv->flc_desc[0].desc[1], 1); 2267 2268 if (err < 0) { 2269 DPAA2_SEC_ERR("Crypto: Incorrect key lengths"); 2270 ret = -EINVAL; 2271 goto error_out; 2272 } 2273 if (priv->flc_desc[0].desc[1] & 1) { 2274 aeaddata.key_type = RTA_DATA_IMM; 2275 } else { 2276 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key); 2277 aeaddata.key_type = RTA_DATA_PTR; 2278 } 2279 priv->flc_desc[0].desc[0] = 0; 2280 priv->flc_desc[0].desc[1] = 0; 2281 2282 if (session->dir == DIR_ENC) 2283 bufsize = cnstr_shdsc_gcm_encap( 2284 priv->flc_desc[0].desc, 1, 0, SHR_NEVER, 2285 &aeaddata, session->iv.length, 2286 session->digest_length); 2287 else 2288 bufsize = cnstr_shdsc_gcm_decap( 2289 priv->flc_desc[0].desc, 1, 0, SHR_NEVER, 2290 &aeaddata, session->iv.length, 2291 session->digest_length); 2292 if (bufsize < 0) { 2293 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2294 ret = -EINVAL; 2295 goto error_out; 2296 } 2297 2298 flc->word1_sdl = (uint8_t)bufsize; 2299 session->ctxt = priv; 2300 #ifdef CAAM_DESC_DEBUG 2301 int i; 2302 for (i = 0; i < bufsize; i++) 2303 DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n", 2304 i, priv->flc_desc[0].desc[i]); 2305 #endif 2306 return ret; 2307 2308 error_out: 2309 rte_free(session->aead_key.data); 2310 rte_free(priv); 2311 return ret; 2312 } 2313 2314 2315 static int 2316 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, 2317 struct rte_crypto_sym_xform *xform, 2318 dpaa2_sec_session *session) 2319 { 2320 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 2321 struct alginfo authdata, cipherdata; 2322 int bufsize; 2323 struct ctxt_priv *priv; 2324 struct sec_flow_context *flc; 2325 struct rte_crypto_cipher_xform *cipher_xform; 2326 struct rte_crypto_auth_xform *auth_xform; 2327 int err, ret = 0; 2328 2329 PMD_INIT_FUNC_TRACE(); 2330 2331 if (session->ext_params.aead_ctxt.auth_cipher_text) { 2332 cipher_xform = &xform->cipher; 2333 auth_xform = &xform->next->auth; 2334 session->ctxt_type = 2335 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2336 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER; 2337 } else { 2338 cipher_xform = &xform->next->cipher; 2339 auth_xform = &xform->auth; 2340 session->ctxt_type = 2341 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2342 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH; 2343 } 2344 2345 /* Set IV parameters */ 2346 session->iv.offset = cipher_xform->iv.offset; 2347 session->iv.length = cipher_xform->iv.length; 2348 2349 /* For SEC AEAD only one descriptor is required */ 2350 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2351 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 2352 RTE_CACHE_LINE_SIZE); 2353 if (priv == NULL) { 2354 DPAA2_SEC_ERR("No Memory for priv CTXT"); 2355 return -ENOMEM; 2356 } 2357 2358 priv->fle_pool = dev_priv->fle_pool; 2359 flc = &priv->flc_desc[0].flc; 2360 2361 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length, 2362 RTE_CACHE_LINE_SIZE); 2363 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) { 2364 DPAA2_SEC_ERR("No Memory for cipher key"); 2365 rte_free(priv); 2366 return -ENOMEM; 2367 } 2368 session->cipher_key.length = cipher_xform->key.length; 2369 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length, 2370 RTE_CACHE_LINE_SIZE); 2371 if (session->auth_key.data == NULL && auth_xform->key.length > 0) { 2372 DPAA2_SEC_ERR("No Memory for auth key"); 2373 rte_free(session->cipher_key.data); 2374 rte_free(priv); 2375 return -ENOMEM; 2376 } 2377 session->auth_key.length = auth_xform->key.length; 2378 memcpy(session->cipher_key.data, cipher_xform->key.data, 2379 cipher_xform->key.length); 2380 memcpy(session->auth_key.data, auth_xform->key.data, 2381 auth_xform->key.length); 2382 2383 authdata.key = (size_t)session->auth_key.data; 2384 authdata.keylen = session->auth_key.length; 2385 authdata.key_enc_flags = 0; 2386 authdata.key_type = RTA_DATA_IMM; 2387 2388 session->digest_length = auth_xform->digest_length; 2389 2390 switch (auth_xform->algo) { 2391 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2392 authdata.algtype = OP_ALG_ALGSEL_SHA1; 2393 authdata.algmode = OP_ALG_AAI_HMAC; 2394 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 2395 break; 2396 case RTE_CRYPTO_AUTH_MD5_HMAC: 2397 authdata.algtype = OP_ALG_ALGSEL_MD5; 2398 authdata.algmode = OP_ALG_AAI_HMAC; 2399 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 2400 break; 2401 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2402 authdata.algtype = OP_ALG_ALGSEL_SHA224; 2403 authdata.algmode = OP_ALG_AAI_HMAC; 2404 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 2405 break; 2406 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2407 authdata.algtype = OP_ALG_ALGSEL_SHA256; 2408 authdata.algmode = OP_ALG_AAI_HMAC; 2409 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 2410 break; 2411 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2412 authdata.algtype = OP_ALG_ALGSEL_SHA384; 2413 authdata.algmode = OP_ALG_AAI_HMAC; 2414 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 2415 break; 2416 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2417 authdata.algtype = OP_ALG_ALGSEL_SHA512; 2418 authdata.algmode = OP_ALG_AAI_HMAC; 2419 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 2420 break; 2421 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2422 authdata.algtype = OP_ALG_ALGSEL_AES; 2423 authdata.algmode = OP_ALG_AAI_XCBC_MAC; 2424 session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC; 2425 break; 2426 case RTE_CRYPTO_AUTH_AES_CMAC: 2427 authdata.algtype = OP_ALG_ALGSEL_AES; 2428 authdata.algmode = OP_ALG_AAI_CMAC; 2429 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC; 2430 break; 2431 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 2432 case RTE_CRYPTO_AUTH_AES_GMAC: 2433 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2434 case RTE_CRYPTO_AUTH_NULL: 2435 case RTE_CRYPTO_AUTH_SHA1: 2436 case RTE_CRYPTO_AUTH_SHA256: 2437 case RTE_CRYPTO_AUTH_SHA512: 2438 case RTE_CRYPTO_AUTH_SHA224: 2439 case RTE_CRYPTO_AUTH_SHA384: 2440 case RTE_CRYPTO_AUTH_MD5: 2441 case RTE_CRYPTO_AUTH_KASUMI_F9: 2442 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2443 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 2444 auth_xform->algo); 2445 ret = -ENOTSUP; 2446 goto error_out; 2447 default: 2448 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", 2449 auth_xform->algo); 2450 ret = -ENOTSUP; 2451 goto error_out; 2452 } 2453 cipherdata.key = (size_t)session->cipher_key.data; 2454 cipherdata.keylen = session->cipher_key.length; 2455 cipherdata.key_enc_flags = 0; 2456 cipherdata.key_type = RTA_DATA_IMM; 2457 2458 switch (cipher_xform->algo) { 2459 case RTE_CRYPTO_CIPHER_AES_CBC: 2460 cipherdata.algtype = OP_ALG_ALGSEL_AES; 2461 cipherdata.algmode = OP_ALG_AAI_CBC; 2462 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 2463 break; 2464 case RTE_CRYPTO_CIPHER_3DES_CBC: 2465 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 2466 cipherdata.algmode = OP_ALG_AAI_CBC; 2467 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 2468 break; 2469 case RTE_CRYPTO_CIPHER_DES_CBC: 2470 cipherdata.algtype = OP_ALG_ALGSEL_DES; 2471 cipherdata.algmode = OP_ALG_AAI_CBC; 2472 session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC; 2473 break; 2474 case RTE_CRYPTO_CIPHER_AES_CTR: 2475 cipherdata.algtype = OP_ALG_ALGSEL_AES; 2476 cipherdata.algmode = OP_ALG_AAI_CTR; 2477 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 2478 break; 2479 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2480 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2481 case RTE_CRYPTO_CIPHER_NULL: 2482 case RTE_CRYPTO_CIPHER_3DES_ECB: 2483 case RTE_CRYPTO_CIPHER_3DES_CTR: 2484 case RTE_CRYPTO_CIPHER_AES_ECB: 2485 case RTE_CRYPTO_CIPHER_KASUMI_F8: 2486 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", 2487 cipher_xform->algo); 2488 ret = -ENOTSUP; 2489 goto error_out; 2490 default: 2491 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 2492 cipher_xform->algo); 2493 ret = -ENOTSUP; 2494 goto error_out; 2495 } 2496 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2497 DIR_ENC : DIR_DEC; 2498 2499 priv->flc_desc[0].desc[0] = cipherdata.keylen; 2500 priv->flc_desc[0].desc[1] = authdata.keylen; 2501 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 2502 DESC_JOB_IO_LEN, 2503 (unsigned int *)priv->flc_desc[0].desc, 2504 &priv->flc_desc[0].desc[2], 2); 2505 2506 if (err < 0) { 2507 DPAA2_SEC_ERR("Crypto: Incorrect key lengths"); 2508 ret = -EINVAL; 2509 goto error_out; 2510 } 2511 if (priv->flc_desc[0].desc[2] & 1) { 2512 cipherdata.key_type = RTA_DATA_IMM; 2513 } else { 2514 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); 2515 cipherdata.key_type = RTA_DATA_PTR; 2516 } 2517 if (priv->flc_desc[0].desc[2] & (1 << 1)) { 2518 authdata.key_type = RTA_DATA_IMM; 2519 } else { 2520 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key); 2521 authdata.key_type = RTA_DATA_PTR; 2522 } 2523 priv->flc_desc[0].desc[0] = 0; 2524 priv->flc_desc[0].desc[1] = 0; 2525 priv->flc_desc[0].desc[2] = 0; 2526 2527 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) { 2528 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1, 2529 0, SHR_SERIAL, 2530 &cipherdata, &authdata, 2531 session->iv.length, 2532 session->digest_length, 2533 session->dir); 2534 if (bufsize < 0) { 2535 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2536 ret = -EINVAL; 2537 goto error_out; 2538 } 2539 } else { 2540 DPAA2_SEC_ERR("Hash before cipher not supported"); 2541 ret = -ENOTSUP; 2542 goto error_out; 2543 } 2544 2545 flc->word1_sdl = (uint8_t)bufsize; 2546 session->ctxt = priv; 2547 #ifdef CAAM_DESC_DEBUG 2548 int i; 2549 for (i = 0; i < bufsize; i++) 2550 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", 2551 i, priv->flc_desc[0].desc[i]); 2552 #endif 2553 2554 return ret; 2555 2556 error_out: 2557 rte_free(session->cipher_key.data); 2558 rte_free(session->auth_key.data); 2559 rte_free(priv); 2560 return ret; 2561 } 2562 2563 static int 2564 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev, 2565 struct rte_crypto_sym_xform *xform, void *sess) 2566 { 2567 dpaa2_sec_session *session = sess; 2568 int ret; 2569 2570 PMD_INIT_FUNC_TRACE(); 2571 2572 if (unlikely(sess == NULL)) { 2573 DPAA2_SEC_ERR("Invalid session struct"); 2574 return -EINVAL; 2575 } 2576 2577 memset(session, 0, sizeof(dpaa2_sec_session)); 2578 /* Default IV length = 0 */ 2579 session->iv.length = 0; 2580 2581 /* Cipher Only */ 2582 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 2583 ret = dpaa2_sec_cipher_init(dev, xform, session); 2584 2585 /* Authentication Only */ 2586 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2587 xform->next == NULL) { 2588 ret = dpaa2_sec_auth_init(dev, xform, session); 2589 2590 /* Cipher then Authenticate */ 2591 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 2592 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2593 session->ext_params.aead_ctxt.auth_cipher_text = true; 2594 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL) 2595 ret = dpaa2_sec_auth_init(dev, xform, session); 2596 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL) 2597 ret = dpaa2_sec_cipher_init(dev, xform, session); 2598 else 2599 ret = dpaa2_sec_aead_chain_init(dev, xform, session); 2600 /* Authenticate then Cipher */ 2601 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2602 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2603 session->ext_params.aead_ctxt.auth_cipher_text = false; 2604 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL) 2605 ret = dpaa2_sec_cipher_init(dev, xform, session); 2606 else if (xform->next->cipher.algo == RTE_CRYPTO_CIPHER_NULL) 2607 ret = dpaa2_sec_auth_init(dev, xform, session); 2608 else 2609 ret = dpaa2_sec_aead_chain_init(dev, xform, session); 2610 /* AEAD operation for AES-GCM kind of Algorithms */ 2611 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && 2612 xform->next == NULL) { 2613 ret = dpaa2_sec_aead_init(dev, xform, session); 2614 2615 } else { 2616 DPAA2_SEC_ERR("Invalid crypto type"); 2617 return -EINVAL; 2618 } 2619 2620 return ret; 2621 } 2622 2623 #ifdef RTE_LIB_SECURITY 2624 static int 2625 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform, 2626 dpaa2_sec_session *session, 2627 struct alginfo *aeaddata) 2628 { 2629 PMD_INIT_FUNC_TRACE(); 2630 2631 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 2632 RTE_CACHE_LINE_SIZE); 2633 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 2634 DPAA2_SEC_ERR("No Memory for aead key"); 2635 return -ENOMEM; 2636 } 2637 memcpy(session->aead_key.data, aead_xform->key.data, 2638 aead_xform->key.length); 2639 2640 session->digest_length = aead_xform->digest_length; 2641 session->aead_key.length = aead_xform->key.length; 2642 2643 aeaddata->key = (size_t)session->aead_key.data; 2644 aeaddata->keylen = session->aead_key.length; 2645 aeaddata->key_enc_flags = 0; 2646 aeaddata->key_type = RTA_DATA_IMM; 2647 2648 switch (aead_xform->algo) { 2649 case RTE_CRYPTO_AEAD_AES_GCM: 2650 switch (session->digest_length) { 2651 case 8: 2652 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM8; 2653 break; 2654 case 12: 2655 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM12; 2656 break; 2657 case 16: 2658 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM16; 2659 break; 2660 default: 2661 DPAA2_SEC_ERR("Crypto: Undefined GCM digest %d", 2662 session->digest_length); 2663 return -EINVAL; 2664 } 2665 aeaddata->algmode = OP_ALG_AAI_GCM; 2666 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; 2667 break; 2668 case RTE_CRYPTO_AEAD_AES_CCM: 2669 switch (session->digest_length) { 2670 case 8: 2671 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM8; 2672 break; 2673 case 12: 2674 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM12; 2675 break; 2676 case 16: 2677 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM16; 2678 break; 2679 default: 2680 DPAA2_SEC_ERR("Crypto: Undefined CCM digest %d", 2681 session->digest_length); 2682 return -EINVAL; 2683 } 2684 aeaddata->algmode = OP_ALG_AAI_CCM; 2685 session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM; 2686 break; 2687 default: 2688 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u", 2689 aead_xform->algo); 2690 return -ENOTSUP; 2691 } 2692 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 2693 DIR_ENC : DIR_DEC; 2694 2695 return 0; 2696 } 2697 2698 static int 2699 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform, 2700 struct rte_crypto_auth_xform *auth_xform, 2701 dpaa2_sec_session *session, 2702 struct alginfo *cipherdata, 2703 struct alginfo *authdata) 2704 { 2705 if (cipher_xform) { 2706 session->cipher_key.data = rte_zmalloc(NULL, 2707 cipher_xform->key.length, 2708 RTE_CACHE_LINE_SIZE); 2709 if (session->cipher_key.data == NULL && 2710 cipher_xform->key.length > 0) { 2711 DPAA2_SEC_ERR("No Memory for cipher key"); 2712 return -ENOMEM; 2713 } 2714 2715 session->cipher_key.length = cipher_xform->key.length; 2716 memcpy(session->cipher_key.data, cipher_xform->key.data, 2717 cipher_xform->key.length); 2718 session->cipher_alg = cipher_xform->algo; 2719 } else { 2720 session->cipher_key.data = NULL; 2721 session->cipher_key.length = 0; 2722 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2723 } 2724 2725 if (auth_xform) { 2726 session->auth_key.data = rte_zmalloc(NULL, 2727 auth_xform->key.length, 2728 RTE_CACHE_LINE_SIZE); 2729 if (session->auth_key.data == NULL && 2730 auth_xform->key.length > 0) { 2731 DPAA2_SEC_ERR("No Memory for auth key"); 2732 return -ENOMEM; 2733 } 2734 session->auth_key.length = auth_xform->key.length; 2735 memcpy(session->auth_key.data, auth_xform->key.data, 2736 auth_xform->key.length); 2737 session->auth_alg = auth_xform->algo; 2738 session->digest_length = auth_xform->digest_length; 2739 } else { 2740 session->auth_key.data = NULL; 2741 session->auth_key.length = 0; 2742 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 2743 } 2744 2745 authdata->key = (size_t)session->auth_key.data; 2746 authdata->keylen = session->auth_key.length; 2747 authdata->key_enc_flags = 0; 2748 authdata->key_type = RTA_DATA_IMM; 2749 switch (session->auth_alg) { 2750 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2751 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96; 2752 authdata->algmode = OP_ALG_AAI_HMAC; 2753 break; 2754 case RTE_CRYPTO_AUTH_MD5_HMAC: 2755 authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96; 2756 authdata->algmode = OP_ALG_AAI_HMAC; 2757 break; 2758 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2759 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128; 2760 authdata->algmode = OP_ALG_AAI_HMAC; 2761 if (session->digest_length != 16) 2762 DPAA2_SEC_WARN( 2763 "+++Using sha256-hmac truncated len is non-standard," 2764 "it will not work with lookaside proto"); 2765 break; 2766 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2767 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192; 2768 authdata->algmode = OP_ALG_AAI_HMAC; 2769 break; 2770 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2771 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256; 2772 authdata->algmode = OP_ALG_AAI_HMAC; 2773 break; 2774 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2775 authdata->algtype = OP_PCL_IPSEC_AES_XCBC_MAC_96; 2776 authdata->algmode = OP_ALG_AAI_XCBC_MAC; 2777 break; 2778 case RTE_CRYPTO_AUTH_AES_CMAC: 2779 authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96; 2780 authdata->algmode = OP_ALG_AAI_CMAC; 2781 break; 2782 case RTE_CRYPTO_AUTH_NULL: 2783 authdata->algtype = OP_PCL_IPSEC_HMAC_NULL; 2784 break; 2785 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2786 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2787 case RTE_CRYPTO_AUTH_SHA1: 2788 case RTE_CRYPTO_AUTH_SHA256: 2789 case RTE_CRYPTO_AUTH_SHA512: 2790 case RTE_CRYPTO_AUTH_SHA224: 2791 case RTE_CRYPTO_AUTH_SHA384: 2792 case RTE_CRYPTO_AUTH_MD5: 2793 case RTE_CRYPTO_AUTH_AES_GMAC: 2794 case RTE_CRYPTO_AUTH_KASUMI_F9: 2795 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 2796 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2797 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 2798 session->auth_alg); 2799 return -ENOTSUP; 2800 default: 2801 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", 2802 session->auth_alg); 2803 return -ENOTSUP; 2804 } 2805 cipherdata->key = (size_t)session->cipher_key.data; 2806 cipherdata->keylen = session->cipher_key.length; 2807 cipherdata->key_enc_flags = 0; 2808 cipherdata->key_type = RTA_DATA_IMM; 2809 2810 switch (session->cipher_alg) { 2811 case RTE_CRYPTO_CIPHER_AES_CBC: 2812 cipherdata->algtype = OP_PCL_IPSEC_AES_CBC; 2813 cipherdata->algmode = OP_ALG_AAI_CBC; 2814 break; 2815 case RTE_CRYPTO_CIPHER_3DES_CBC: 2816 cipherdata->algtype = OP_PCL_IPSEC_3DES; 2817 cipherdata->algmode = OP_ALG_AAI_CBC; 2818 break; 2819 case RTE_CRYPTO_CIPHER_DES_CBC: 2820 cipherdata->algtype = OP_PCL_IPSEC_DES; 2821 cipherdata->algmode = OP_ALG_AAI_CBC; 2822 break; 2823 case RTE_CRYPTO_CIPHER_AES_CTR: 2824 cipherdata->algtype = OP_PCL_IPSEC_AES_CTR; 2825 cipherdata->algmode = OP_ALG_AAI_CTR; 2826 break; 2827 case RTE_CRYPTO_CIPHER_NULL: 2828 cipherdata->algtype = OP_PCL_IPSEC_NULL; 2829 break; 2830 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2831 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2832 case RTE_CRYPTO_CIPHER_3DES_ECB: 2833 case RTE_CRYPTO_CIPHER_3DES_CTR: 2834 case RTE_CRYPTO_CIPHER_AES_ECB: 2835 case RTE_CRYPTO_CIPHER_KASUMI_F8: 2836 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", 2837 session->cipher_alg); 2838 return -ENOTSUP; 2839 default: 2840 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 2841 session->cipher_alg); 2842 return -ENOTSUP; 2843 } 2844 2845 return 0; 2846 } 2847 2848 static int 2849 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, 2850 struct rte_security_session_conf *conf, 2851 void *sess) 2852 { 2853 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec; 2854 struct rte_crypto_cipher_xform *cipher_xform = NULL; 2855 struct rte_crypto_auth_xform *auth_xform = NULL; 2856 struct rte_crypto_aead_xform *aead_xform = NULL; 2857 dpaa2_sec_session *session = (dpaa2_sec_session *)sess; 2858 struct ctxt_priv *priv; 2859 struct alginfo authdata, cipherdata; 2860 int bufsize; 2861 struct sec_flow_context *flc; 2862 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 2863 int ret = -1; 2864 2865 PMD_INIT_FUNC_TRACE(); 2866 2867 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2868 sizeof(struct ctxt_priv) + 2869 sizeof(struct sec_flc_desc), 2870 RTE_CACHE_LINE_SIZE); 2871 2872 if (priv == NULL) { 2873 DPAA2_SEC_ERR("No memory for priv CTXT"); 2874 return -ENOMEM; 2875 } 2876 2877 priv->fle_pool = dev_priv->fle_pool; 2878 flc = &priv->flc_desc[0].flc; 2879 2880 memset(session, 0, sizeof(dpaa2_sec_session)); 2881 2882 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2883 cipher_xform = &conf->crypto_xform->cipher; 2884 if (conf->crypto_xform->next) 2885 auth_xform = &conf->crypto_xform->next->auth; 2886 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform, 2887 session, &cipherdata, &authdata); 2888 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2889 auth_xform = &conf->crypto_xform->auth; 2890 if (conf->crypto_xform->next) 2891 cipher_xform = &conf->crypto_xform->next->cipher; 2892 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform, 2893 session, &cipherdata, &authdata); 2894 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { 2895 aead_xform = &conf->crypto_xform->aead; 2896 ret = dpaa2_sec_ipsec_aead_init(aead_xform, 2897 session, &cipherdata); 2898 authdata.keylen = 0; 2899 authdata.algtype = 0; 2900 } else { 2901 DPAA2_SEC_ERR("XFORM not specified"); 2902 ret = -EINVAL; 2903 goto out; 2904 } 2905 if (ret) { 2906 DPAA2_SEC_ERR("Failed to process xform"); 2907 goto out; 2908 } 2909 2910 session->ctxt_type = DPAA2_SEC_IPSEC; 2911 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 2912 uint8_t *hdr = NULL; 2913 struct ip ip4_hdr; 2914 struct rte_ipv6_hdr ip6_hdr; 2915 struct ipsec_encap_pdb encap_pdb; 2916 2917 flc->dhr = SEC_FLC_DHR_OUTBOUND; 2918 /* For Sec Proto only one descriptor is required. */ 2919 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb)); 2920 2921 /* copy algo specific data to PDB */ 2922 switch (cipherdata.algtype) { 2923 case OP_PCL_IPSEC_AES_CTR: 2924 encap_pdb.ctr.ctr_initial = 0x00000001; 2925 encap_pdb.ctr.ctr_nonce = ipsec_xform->salt; 2926 break; 2927 case OP_PCL_IPSEC_AES_GCM8: 2928 case OP_PCL_IPSEC_AES_GCM12: 2929 case OP_PCL_IPSEC_AES_GCM16: 2930 memcpy(encap_pdb.gcm.salt, 2931 (uint8_t *)&(ipsec_xform->salt), 4); 2932 break; 2933 } 2934 2935 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) | 2936 PDBOPTS_ESP_OIHI_PDB_INL | 2937 PDBOPTS_ESP_IVSRC | 2938 PDBHMO_ESP_ENCAP_DTTL | 2939 PDBHMO_ESP_SNR; 2940 if (ipsec_xform->options.esn) 2941 encap_pdb.options |= PDBOPTS_ESP_ESN; 2942 encap_pdb.spi = ipsec_xform->spi; 2943 session->dir = DIR_ENC; 2944 if (ipsec_xform->tunnel.type == 2945 RTE_SECURITY_IPSEC_TUNNEL_IPV4) { 2946 encap_pdb.ip_hdr_len = sizeof(struct ip); 2947 ip4_hdr.ip_v = IPVERSION; 2948 ip4_hdr.ip_hl = 5; 2949 ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr)); 2950 ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp; 2951 ip4_hdr.ip_id = 0; 2952 ip4_hdr.ip_off = 0; 2953 ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl; 2954 ip4_hdr.ip_p = IPPROTO_ESP; 2955 ip4_hdr.ip_sum = 0; 2956 ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip; 2957 ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip; 2958 ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *) 2959 &ip4_hdr, sizeof(struct ip)); 2960 hdr = (uint8_t *)&ip4_hdr; 2961 } else if (ipsec_xform->tunnel.type == 2962 RTE_SECURITY_IPSEC_TUNNEL_IPV6) { 2963 ip6_hdr.vtc_flow = rte_cpu_to_be_32( 2964 DPAA2_IPv6_DEFAULT_VTC_FLOW | 2965 ((ipsec_xform->tunnel.ipv6.dscp << 2966 RTE_IPV6_HDR_TC_SHIFT) & 2967 RTE_IPV6_HDR_TC_MASK) | 2968 ((ipsec_xform->tunnel.ipv6.flabel << 2969 RTE_IPV6_HDR_FL_SHIFT) & 2970 RTE_IPV6_HDR_FL_MASK)); 2971 /* Payload length will be updated by HW */ 2972 ip6_hdr.payload_len = 0; 2973 ip6_hdr.hop_limits = 2974 ipsec_xform->tunnel.ipv6.hlimit; 2975 ip6_hdr.proto = (ipsec_xform->proto == 2976 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? 2977 IPPROTO_ESP : IPPROTO_AH; 2978 memcpy(&ip6_hdr.src_addr, 2979 &ipsec_xform->tunnel.ipv6.src_addr, 16); 2980 memcpy(&ip6_hdr.dst_addr, 2981 &ipsec_xform->tunnel.ipv6.dst_addr, 16); 2982 encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr); 2983 hdr = (uint8_t *)&ip6_hdr; 2984 } 2985 2986 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc, 2987 1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ? 2988 SHR_WAIT : SHR_SERIAL, &encap_pdb, 2989 hdr, &cipherdata, &authdata); 2990 } else if (ipsec_xform->direction == 2991 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { 2992 struct ipsec_decap_pdb decap_pdb; 2993 2994 flc->dhr = SEC_FLC_DHR_INBOUND; 2995 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb)); 2996 /* copy algo specific data to PDB */ 2997 switch (cipherdata.algtype) { 2998 case OP_PCL_IPSEC_AES_CTR: 2999 decap_pdb.ctr.ctr_initial = 0x00000001; 3000 decap_pdb.ctr.ctr_nonce = ipsec_xform->salt; 3001 break; 3002 case OP_PCL_IPSEC_AES_GCM8: 3003 case OP_PCL_IPSEC_AES_GCM12: 3004 case OP_PCL_IPSEC_AES_GCM16: 3005 memcpy(decap_pdb.gcm.salt, 3006 (uint8_t *)&(ipsec_xform->salt), 4); 3007 break; 3008 } 3009 3010 decap_pdb.options = (ipsec_xform->tunnel.type == 3011 RTE_SECURITY_IPSEC_TUNNEL_IPV4) ? 3012 sizeof(struct ip) << 16 : 3013 sizeof(struct rte_ipv6_hdr) << 16; 3014 if (ipsec_xform->options.esn) 3015 decap_pdb.options |= PDBOPTS_ESP_ESN; 3016 3017 if (ipsec_xform->replay_win_sz) { 3018 uint32_t win_sz; 3019 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz); 3020 3021 if (rta_sec_era < RTA_SEC_ERA_10 && win_sz > 128) { 3022 DPAA2_SEC_INFO("Max Anti replay Win sz = 128"); 3023 win_sz = 128; 3024 } 3025 switch (win_sz) { 3026 case 1: 3027 case 2: 3028 case 4: 3029 case 8: 3030 case 16: 3031 case 32: 3032 decap_pdb.options |= PDBOPTS_ESP_ARS32; 3033 break; 3034 case 64: 3035 decap_pdb.options |= PDBOPTS_ESP_ARS64; 3036 break; 3037 case 256: 3038 decap_pdb.options |= PDBOPTS_ESP_ARS256; 3039 break; 3040 case 512: 3041 decap_pdb.options |= PDBOPTS_ESP_ARS512; 3042 break; 3043 case 1024: 3044 decap_pdb.options |= PDBOPTS_ESP_ARS1024; 3045 break; 3046 case 128: 3047 default: 3048 decap_pdb.options |= PDBOPTS_ESP_ARS128; 3049 } 3050 } 3051 session->dir = DIR_DEC; 3052 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc, 3053 1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ? 3054 SHR_WAIT : SHR_SERIAL, 3055 &decap_pdb, &cipherdata, &authdata); 3056 } else 3057 goto out; 3058 3059 if (bufsize < 0) { 3060 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 3061 goto out; 3062 } 3063 3064 flc->word1_sdl = (uint8_t)bufsize; 3065 3066 /* Enable the stashing control bit */ 3067 DPAA2_SET_FLC_RSC(flc); 3068 flc->word2_rflc_31_0 = lower_32_bits( 3069 (size_t)&(((struct dpaa2_sec_qp *) 3070 dev->data->queue_pairs[0])->rx_vq) | 0x14); 3071 flc->word3_rflc_63_32 = upper_32_bits( 3072 (size_t)&(((struct dpaa2_sec_qp *) 3073 dev->data->queue_pairs[0])->rx_vq)); 3074 3075 /* Set EWS bit i.e. enable write-safe */ 3076 DPAA2_SET_FLC_EWS(flc); 3077 /* Set BS = 1 i.e reuse input buffers as output buffers */ 3078 DPAA2_SET_FLC_REUSE_BS(flc); 3079 /* Set FF = 10; reuse input buffers if they provide sufficient space */ 3080 DPAA2_SET_FLC_REUSE_FF(flc); 3081 3082 session->ctxt = priv; 3083 3084 return 0; 3085 out: 3086 rte_free(session->auth_key.data); 3087 rte_free(session->cipher_key.data); 3088 rte_free(priv); 3089 return ret; 3090 } 3091 3092 static int 3093 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev, 3094 struct rte_security_session_conf *conf, 3095 void *sess) 3096 { 3097 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp; 3098 struct rte_crypto_sym_xform *xform = conf->crypto_xform; 3099 struct rte_crypto_auth_xform *auth_xform = NULL; 3100 struct rte_crypto_cipher_xform *cipher_xform = NULL; 3101 dpaa2_sec_session *session = (dpaa2_sec_session *)sess; 3102 struct ctxt_priv *priv; 3103 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 3104 struct alginfo authdata, cipherdata; 3105 struct alginfo *p_authdata = NULL; 3106 int bufsize = -1; 3107 struct sec_flow_context *flc; 3108 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 3109 int swap = true; 3110 #else 3111 int swap = false; 3112 #endif 3113 3114 PMD_INIT_FUNC_TRACE(); 3115 3116 memset(session, 0, sizeof(dpaa2_sec_session)); 3117 3118 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 3119 sizeof(struct ctxt_priv) + 3120 sizeof(struct sec_flc_desc), 3121 RTE_CACHE_LINE_SIZE); 3122 3123 if (priv == NULL) { 3124 DPAA2_SEC_ERR("No memory for priv CTXT"); 3125 return -ENOMEM; 3126 } 3127 3128 priv->fle_pool = dev_priv->fle_pool; 3129 flc = &priv->flc_desc[0].flc; 3130 3131 /* find xfrm types */ 3132 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 3133 cipher_xform = &xform->cipher; 3134 if (xform->next != NULL) { 3135 session->ext_params.aead_ctxt.auth_cipher_text = true; 3136 auth_xform = &xform->next->auth; 3137 } 3138 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 3139 auth_xform = &xform->auth; 3140 if (xform->next != NULL) { 3141 session->ext_params.aead_ctxt.auth_cipher_text = false; 3142 cipher_xform = &xform->next->cipher; 3143 } 3144 } else { 3145 DPAA2_SEC_ERR("Invalid crypto type"); 3146 return -EINVAL; 3147 } 3148 3149 session->ctxt_type = DPAA2_SEC_PDCP; 3150 if (cipher_xform) { 3151 session->cipher_key.data = rte_zmalloc(NULL, 3152 cipher_xform->key.length, 3153 RTE_CACHE_LINE_SIZE); 3154 if (session->cipher_key.data == NULL && 3155 cipher_xform->key.length > 0) { 3156 DPAA2_SEC_ERR("No Memory for cipher key"); 3157 rte_free(priv); 3158 return -ENOMEM; 3159 } 3160 session->cipher_key.length = cipher_xform->key.length; 3161 memcpy(session->cipher_key.data, cipher_xform->key.data, 3162 cipher_xform->key.length); 3163 session->dir = 3164 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 3165 DIR_ENC : DIR_DEC; 3166 session->cipher_alg = cipher_xform->algo; 3167 } else { 3168 session->cipher_key.data = NULL; 3169 session->cipher_key.length = 0; 3170 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 3171 session->dir = DIR_ENC; 3172 } 3173 3174 session->pdcp.domain = pdcp_xform->domain; 3175 session->pdcp.bearer = pdcp_xform->bearer; 3176 session->pdcp.pkt_dir = pdcp_xform->pkt_dir; 3177 session->pdcp.sn_size = pdcp_xform->sn_size; 3178 session->pdcp.hfn = pdcp_xform->hfn; 3179 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold; 3180 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd; 3181 /* hfv ovd offset location is stored in iv.offset value*/ 3182 if (cipher_xform) 3183 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset; 3184 3185 cipherdata.key = (size_t)session->cipher_key.data; 3186 cipherdata.keylen = session->cipher_key.length; 3187 cipherdata.key_enc_flags = 0; 3188 cipherdata.key_type = RTA_DATA_IMM; 3189 3190 switch (session->cipher_alg) { 3191 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 3192 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW; 3193 break; 3194 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 3195 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC; 3196 break; 3197 case RTE_CRYPTO_CIPHER_AES_CTR: 3198 cipherdata.algtype = PDCP_CIPHER_TYPE_AES; 3199 break; 3200 case RTE_CRYPTO_CIPHER_NULL: 3201 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL; 3202 break; 3203 default: 3204 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 3205 session->cipher_alg); 3206 goto out; 3207 } 3208 3209 if (auth_xform) { 3210 session->auth_key.data = rte_zmalloc(NULL, 3211 auth_xform->key.length, 3212 RTE_CACHE_LINE_SIZE); 3213 if (!session->auth_key.data && 3214 auth_xform->key.length > 0) { 3215 DPAA2_SEC_ERR("No Memory for auth key"); 3216 rte_free(session->cipher_key.data); 3217 rte_free(priv); 3218 return -ENOMEM; 3219 } 3220 session->auth_key.length = auth_xform->key.length; 3221 memcpy(session->auth_key.data, auth_xform->key.data, 3222 auth_xform->key.length); 3223 session->auth_alg = auth_xform->algo; 3224 } else { 3225 session->auth_key.data = NULL; 3226 session->auth_key.length = 0; 3227 session->auth_alg = 0; 3228 } 3229 authdata.key = (size_t)session->auth_key.data; 3230 authdata.keylen = session->auth_key.length; 3231 authdata.key_enc_flags = 0; 3232 authdata.key_type = RTA_DATA_IMM; 3233 3234 if (session->auth_alg) { 3235 switch (session->auth_alg) { 3236 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 3237 authdata.algtype = PDCP_AUTH_TYPE_SNOW; 3238 break; 3239 case RTE_CRYPTO_AUTH_ZUC_EIA3: 3240 authdata.algtype = PDCP_AUTH_TYPE_ZUC; 3241 break; 3242 case RTE_CRYPTO_AUTH_AES_CMAC: 3243 authdata.algtype = PDCP_AUTH_TYPE_AES; 3244 break; 3245 case RTE_CRYPTO_AUTH_NULL: 3246 authdata.algtype = PDCP_AUTH_TYPE_NULL; 3247 break; 3248 default: 3249 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 3250 session->auth_alg); 3251 goto out; 3252 } 3253 3254 p_authdata = &authdata; 3255 } else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 3256 DPAA2_SEC_ERR("Crypto: Integrity must for c-plane"); 3257 goto out; 3258 } 3259 3260 if (pdcp_xform->sdap_enabled) { 3261 int nb_keys_to_inline = 3262 rta_inline_pdcp_sdap_query(authdata.algtype, 3263 cipherdata.algtype, 3264 session->pdcp.sn_size, 3265 session->pdcp.hfn_ovd); 3266 if (nb_keys_to_inline >= 1) { 3267 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); 3268 cipherdata.key_type = RTA_DATA_PTR; 3269 } 3270 if (nb_keys_to_inline >= 2) { 3271 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key); 3272 authdata.key_type = RTA_DATA_PTR; 3273 } 3274 } else { 3275 if (rta_inline_pdcp_query(authdata.algtype, 3276 cipherdata.algtype, 3277 session->pdcp.sn_size, 3278 session->pdcp.hfn_ovd)) { 3279 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); 3280 cipherdata.key_type = RTA_DATA_PTR; 3281 } 3282 } 3283 3284 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 3285 if (session->dir == DIR_ENC) 3286 bufsize = cnstr_shdsc_pdcp_c_plane_encap( 3287 priv->flc_desc[0].desc, 1, swap, 3288 pdcp_xform->hfn, 3289 session->pdcp.sn_size, 3290 pdcp_xform->bearer, 3291 pdcp_xform->pkt_dir, 3292 pdcp_xform->hfn_threshold, 3293 &cipherdata, &authdata, 3294 0); 3295 else if (session->dir == DIR_DEC) 3296 bufsize = cnstr_shdsc_pdcp_c_plane_decap( 3297 priv->flc_desc[0].desc, 1, swap, 3298 pdcp_xform->hfn, 3299 session->pdcp.sn_size, 3300 pdcp_xform->bearer, 3301 pdcp_xform->pkt_dir, 3302 pdcp_xform->hfn_threshold, 3303 &cipherdata, &authdata, 3304 0); 3305 3306 } else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) { 3307 bufsize = cnstr_shdsc_pdcp_short_mac(priv->flc_desc[0].desc, 3308 1, swap, &authdata); 3309 } else { 3310 if (session->dir == DIR_ENC) { 3311 if (pdcp_xform->sdap_enabled) 3312 bufsize = cnstr_shdsc_pdcp_sdap_u_plane_encap( 3313 priv->flc_desc[0].desc, 1, swap, 3314 session->pdcp.sn_size, 3315 pdcp_xform->hfn, 3316 pdcp_xform->bearer, 3317 pdcp_xform->pkt_dir, 3318 pdcp_xform->hfn_threshold, 3319 &cipherdata, p_authdata, 0); 3320 else 3321 bufsize = cnstr_shdsc_pdcp_u_plane_encap( 3322 priv->flc_desc[0].desc, 1, swap, 3323 session->pdcp.sn_size, 3324 pdcp_xform->hfn, 3325 pdcp_xform->bearer, 3326 pdcp_xform->pkt_dir, 3327 pdcp_xform->hfn_threshold, 3328 &cipherdata, p_authdata, 0); 3329 } else if (session->dir == DIR_DEC) { 3330 if (pdcp_xform->sdap_enabled) 3331 bufsize = cnstr_shdsc_pdcp_sdap_u_plane_decap( 3332 priv->flc_desc[0].desc, 1, swap, 3333 session->pdcp.sn_size, 3334 pdcp_xform->hfn, 3335 pdcp_xform->bearer, 3336 pdcp_xform->pkt_dir, 3337 pdcp_xform->hfn_threshold, 3338 &cipherdata, p_authdata, 0); 3339 else 3340 bufsize = cnstr_shdsc_pdcp_u_plane_decap( 3341 priv->flc_desc[0].desc, 1, swap, 3342 session->pdcp.sn_size, 3343 pdcp_xform->hfn, 3344 pdcp_xform->bearer, 3345 pdcp_xform->pkt_dir, 3346 pdcp_xform->hfn_threshold, 3347 &cipherdata, p_authdata, 0); 3348 } 3349 } 3350 3351 if (bufsize < 0) { 3352 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 3353 goto out; 3354 } 3355 3356 /* Enable the stashing control bit */ 3357 DPAA2_SET_FLC_RSC(flc); 3358 flc->word2_rflc_31_0 = lower_32_bits( 3359 (size_t)&(((struct dpaa2_sec_qp *) 3360 dev->data->queue_pairs[0])->rx_vq) | 0x14); 3361 flc->word3_rflc_63_32 = upper_32_bits( 3362 (size_t)&(((struct dpaa2_sec_qp *) 3363 dev->data->queue_pairs[0])->rx_vq)); 3364 3365 flc->word1_sdl = (uint8_t)bufsize; 3366 3367 /* TODO - check the perf impact or 3368 * align as per descriptor type 3369 * Set EWS bit i.e. enable write-safe 3370 * DPAA2_SET_FLC_EWS(flc); 3371 */ 3372 3373 /* Set BS = 1 i.e reuse input buffers as output buffers */ 3374 DPAA2_SET_FLC_REUSE_BS(flc); 3375 /* Set FF = 10; reuse input buffers if they provide sufficient space */ 3376 DPAA2_SET_FLC_REUSE_FF(flc); 3377 3378 session->ctxt = priv; 3379 3380 return 0; 3381 out: 3382 rte_free(session->auth_key.data); 3383 rte_free(session->cipher_key.data); 3384 rte_free(priv); 3385 return -EINVAL; 3386 } 3387 3388 static int 3389 dpaa2_sec_security_session_create(void *dev, 3390 struct rte_security_session_conf *conf, 3391 struct rte_security_session *sess, 3392 struct rte_mempool *mempool) 3393 { 3394 void *sess_private_data; 3395 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; 3396 int ret; 3397 3398 if (rte_mempool_get(mempool, &sess_private_data)) { 3399 DPAA2_SEC_ERR("Couldn't get object from session mempool"); 3400 return -ENOMEM; 3401 } 3402 3403 switch (conf->protocol) { 3404 case RTE_SECURITY_PROTOCOL_IPSEC: 3405 ret = dpaa2_sec_set_ipsec_session(cdev, conf, 3406 sess_private_data); 3407 break; 3408 case RTE_SECURITY_PROTOCOL_MACSEC: 3409 return -ENOTSUP; 3410 case RTE_SECURITY_PROTOCOL_PDCP: 3411 ret = dpaa2_sec_set_pdcp_session(cdev, conf, 3412 sess_private_data); 3413 break; 3414 default: 3415 return -EINVAL; 3416 } 3417 if (ret != 0) { 3418 DPAA2_SEC_ERR("Failed to configure session parameters"); 3419 /* Return session to mempool */ 3420 rte_mempool_put(mempool, sess_private_data); 3421 return ret; 3422 } 3423 3424 set_sec_session_private_data(sess, sess_private_data); 3425 3426 return ret; 3427 } 3428 3429 /** Clear the memory of session so it doesn't leave key material behind */ 3430 static int 3431 dpaa2_sec_security_session_destroy(void *dev __rte_unused, 3432 struct rte_security_session *sess) 3433 { 3434 PMD_INIT_FUNC_TRACE(); 3435 void *sess_priv = get_sec_session_private_data(sess); 3436 3437 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv; 3438 3439 if (sess_priv) { 3440 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 3441 3442 rte_free(s->ctxt); 3443 rte_free(s->cipher_key.data); 3444 rte_free(s->auth_key.data); 3445 memset(s, 0, sizeof(dpaa2_sec_session)); 3446 set_sec_session_private_data(sess, NULL); 3447 rte_mempool_put(sess_mp, sess_priv); 3448 } 3449 return 0; 3450 } 3451 #endif 3452 static int 3453 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev, 3454 struct rte_crypto_sym_xform *xform, 3455 struct rte_cryptodev_sym_session *sess, 3456 struct rte_mempool *mempool) 3457 { 3458 void *sess_private_data; 3459 int ret; 3460 3461 if (rte_mempool_get(mempool, &sess_private_data)) { 3462 DPAA2_SEC_ERR("Couldn't get object from session mempool"); 3463 return -ENOMEM; 3464 } 3465 3466 ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data); 3467 if (ret != 0) { 3468 DPAA2_SEC_ERR("Failed to configure session parameters"); 3469 /* Return session to mempool */ 3470 rte_mempool_put(mempool, sess_private_data); 3471 return ret; 3472 } 3473 3474 set_sym_session_private_data(sess, dev->driver_id, 3475 sess_private_data); 3476 3477 return 0; 3478 } 3479 3480 /** Clear the memory of session so it doesn't leave key material behind */ 3481 static void 3482 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev, 3483 struct rte_cryptodev_sym_session *sess) 3484 { 3485 PMD_INIT_FUNC_TRACE(); 3486 uint8_t index = dev->driver_id; 3487 void *sess_priv = get_sym_session_private_data(sess, index); 3488 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv; 3489 3490 if (sess_priv) { 3491 rte_free(s->ctxt); 3492 rte_free(s->cipher_key.data); 3493 rte_free(s->auth_key.data); 3494 memset(s, 0, sizeof(dpaa2_sec_session)); 3495 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 3496 set_sym_session_private_data(sess, index, NULL); 3497 rte_mempool_put(sess_mp, sess_priv); 3498 } 3499 } 3500 3501 static int 3502 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused, 3503 struct rte_cryptodev_config *config __rte_unused) 3504 { 3505 PMD_INIT_FUNC_TRACE(); 3506 3507 return 0; 3508 } 3509 3510 static int 3511 dpaa2_sec_dev_start(struct rte_cryptodev *dev) 3512 { 3513 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3514 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3515 struct dpseci_attr attr; 3516 struct dpaa2_queue *dpaa2_q; 3517 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3518 dev->data->queue_pairs; 3519 struct dpseci_rx_queue_attr rx_attr; 3520 struct dpseci_tx_queue_attr tx_attr; 3521 int ret, i; 3522 3523 PMD_INIT_FUNC_TRACE(); 3524 3525 memset(&attr, 0, sizeof(struct dpseci_attr)); 3526 3527 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token); 3528 if (ret) { 3529 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED", 3530 priv->hw_id); 3531 goto get_attr_failure; 3532 } 3533 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr); 3534 if (ret) { 3535 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC"); 3536 goto get_attr_failure; 3537 } 3538 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) { 3539 dpaa2_q = &qp[i]->rx_vq; 3540 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 3541 &rx_attr); 3542 dpaa2_q->fqid = rx_attr.fqid; 3543 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid); 3544 } 3545 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) { 3546 dpaa2_q = &qp[i]->tx_vq; 3547 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 3548 &tx_attr); 3549 dpaa2_q->fqid = tx_attr.fqid; 3550 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid); 3551 } 3552 3553 return 0; 3554 get_attr_failure: 3555 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 3556 return -1; 3557 } 3558 3559 static void 3560 dpaa2_sec_dev_stop(struct rte_cryptodev *dev) 3561 { 3562 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3563 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3564 int ret; 3565 3566 PMD_INIT_FUNC_TRACE(); 3567 3568 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 3569 if (ret) { 3570 DPAA2_SEC_ERR("Failure in disabling dpseci %d device", 3571 priv->hw_id); 3572 return; 3573 } 3574 3575 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token); 3576 if (ret < 0) { 3577 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret); 3578 return; 3579 } 3580 } 3581 3582 static int 3583 dpaa2_sec_dev_close(struct rte_cryptodev *dev __rte_unused) 3584 { 3585 PMD_INIT_FUNC_TRACE(); 3586 3587 return 0; 3588 } 3589 3590 static void 3591 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev, 3592 struct rte_cryptodev_info *info) 3593 { 3594 struct dpaa2_sec_dev_private *internals = dev->data->dev_private; 3595 3596 PMD_INIT_FUNC_TRACE(); 3597 if (info != NULL) { 3598 info->max_nb_queue_pairs = internals->max_nb_queue_pairs; 3599 info->feature_flags = dev->feature_flags; 3600 info->capabilities = dpaa2_sec_capabilities; 3601 /* No limit of number of sessions */ 3602 info->sym.max_nb_sessions = 0; 3603 info->driver_id = cryptodev_driver_id; 3604 } 3605 } 3606 3607 static 3608 void dpaa2_sec_stats_get(struct rte_cryptodev *dev, 3609 struct rte_cryptodev_stats *stats) 3610 { 3611 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3612 struct fsl_mc_io dpseci; 3613 struct dpseci_sec_counters counters = {0}; 3614 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3615 dev->data->queue_pairs; 3616 int ret, i; 3617 3618 PMD_INIT_FUNC_TRACE(); 3619 if (stats == NULL) { 3620 DPAA2_SEC_ERR("Invalid stats ptr NULL"); 3621 return; 3622 } 3623 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 3624 if (qp == NULL || qp[i] == NULL) { 3625 DPAA2_SEC_DEBUG("Uninitialised queue pair"); 3626 continue; 3627 } 3628 3629 stats->enqueued_count += qp[i]->tx_vq.tx_pkts; 3630 stats->dequeued_count += qp[i]->rx_vq.rx_pkts; 3631 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts; 3632 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts; 3633 } 3634 3635 /* In case as secondary process access stats, MCP portal in priv-hw 3636 * may have primary process address. Need the secondary process 3637 * based MCP portal address for this object. 3638 */ 3639 dpseci.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX); 3640 ret = dpseci_get_sec_counters(&dpseci, CMD_PRI_LOW, priv->token, 3641 &counters); 3642 if (ret) { 3643 DPAA2_SEC_ERR("SEC counters failed"); 3644 } else { 3645 DPAA2_SEC_INFO("dpseci hardware stats:" 3646 "\n\tNum of Requests Dequeued = %" PRIu64 3647 "\n\tNum of Outbound Encrypt Requests = %" PRIu64 3648 "\n\tNum of Inbound Decrypt Requests = %" PRIu64 3649 "\n\tNum of Outbound Bytes Encrypted = %" PRIu64 3650 "\n\tNum of Outbound Bytes Protected = %" PRIu64 3651 "\n\tNum of Inbound Bytes Decrypted = %" PRIu64 3652 "\n\tNum of Inbound Bytes Validated = %" PRIu64, 3653 counters.dequeued_requests, 3654 counters.ob_enc_requests, 3655 counters.ib_dec_requests, 3656 counters.ob_enc_bytes, 3657 counters.ob_prot_bytes, 3658 counters.ib_dec_bytes, 3659 counters.ib_valid_bytes); 3660 } 3661 } 3662 3663 static 3664 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev) 3665 { 3666 int i; 3667 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3668 (dev->data->queue_pairs); 3669 3670 PMD_INIT_FUNC_TRACE(); 3671 3672 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 3673 if (qp[i] == NULL) { 3674 DPAA2_SEC_DEBUG("Uninitialised queue pair"); 3675 continue; 3676 } 3677 qp[i]->tx_vq.rx_pkts = 0; 3678 qp[i]->tx_vq.tx_pkts = 0; 3679 qp[i]->tx_vq.err_pkts = 0; 3680 qp[i]->rx_vq.rx_pkts = 0; 3681 qp[i]->rx_vq.tx_pkts = 0; 3682 qp[i]->rx_vq.err_pkts = 0; 3683 } 3684 } 3685 3686 static void __rte_hot 3687 dpaa2_sec_process_parallel_event(struct qbman_swp *swp, 3688 const struct qbman_fd *fd, 3689 const struct qbman_result *dq, 3690 struct dpaa2_queue *rxq, 3691 struct rte_event *ev) 3692 { 3693 /* Prefetching mbuf */ 3694 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)- 3695 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size)); 3696 3697 /* Prefetching ipsec crypto_op stored in priv data of mbuf */ 3698 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64)); 3699 3700 ev->flow_id = rxq->ev.flow_id; 3701 ev->sub_event_type = rxq->ev.sub_event_type; 3702 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3703 ev->op = RTE_EVENT_OP_NEW; 3704 ev->sched_type = rxq->ev.sched_type; 3705 ev->queue_id = rxq->ev.queue_id; 3706 ev->priority = rxq->ev.priority; 3707 ev->event_ptr = sec_fd_to_mbuf(fd); 3708 3709 qbman_swp_dqrr_consume(swp, dq); 3710 } 3711 static void 3712 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused, 3713 const struct qbman_fd *fd, 3714 const struct qbman_result *dq, 3715 struct dpaa2_queue *rxq, 3716 struct rte_event *ev) 3717 { 3718 uint8_t dqrr_index; 3719 struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr; 3720 /* Prefetching mbuf */ 3721 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)- 3722 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size)); 3723 3724 /* Prefetching ipsec crypto_op stored in priv data of mbuf */ 3725 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64)); 3726 3727 ev->flow_id = rxq->ev.flow_id; 3728 ev->sub_event_type = rxq->ev.sub_event_type; 3729 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3730 ev->op = RTE_EVENT_OP_NEW; 3731 ev->sched_type = rxq->ev.sched_type; 3732 ev->queue_id = rxq->ev.queue_id; 3733 ev->priority = rxq->ev.priority; 3734 3735 ev->event_ptr = sec_fd_to_mbuf(fd); 3736 dqrr_index = qbman_get_dqrr_idx(dq); 3737 *dpaa2_seqn(crypto_op->sym->m_src) = dqrr_index + 1; 3738 DPAA2_PER_LCORE_DQRR_SIZE++; 3739 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; 3740 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src; 3741 } 3742 3743 int 3744 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev, 3745 int qp_id, 3746 struct dpaa2_dpcon_dev *dpcon, 3747 const struct rte_event *event) 3748 { 3749 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3750 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3751 struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id]; 3752 struct dpseci_rx_queue_cfg cfg; 3753 uint8_t priority; 3754 int ret; 3755 3756 if (event->sched_type == RTE_SCHED_TYPE_PARALLEL) 3757 qp->rx_vq.cb = dpaa2_sec_process_parallel_event; 3758 else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) 3759 qp->rx_vq.cb = dpaa2_sec_process_atomic_event; 3760 else 3761 return -EINVAL; 3762 3763 priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / event->priority) * 3764 (dpcon->num_priorities - 1); 3765 3766 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 3767 cfg.options = DPSECI_QUEUE_OPT_DEST; 3768 cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON; 3769 cfg.dest_cfg.dest_id = dpcon->dpcon_id; 3770 cfg.dest_cfg.priority = priority; 3771 3772 cfg.options |= DPSECI_QUEUE_OPT_USER_CTX; 3773 cfg.user_ctx = (size_t)(qp); 3774 if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) { 3775 cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION; 3776 cfg.order_preservation_en = 1; 3777 } 3778 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 3779 qp_id, &cfg); 3780 if (ret) { 3781 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret); 3782 return ret; 3783 } 3784 3785 memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event)); 3786 3787 return 0; 3788 } 3789 3790 int 3791 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev, 3792 int qp_id) 3793 { 3794 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3795 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3796 struct dpseci_rx_queue_cfg cfg; 3797 int ret; 3798 3799 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 3800 cfg.options = DPSECI_QUEUE_OPT_DEST; 3801 cfg.dest_cfg.dest_type = DPSECI_DEST_NONE; 3802 3803 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 3804 qp_id, &cfg); 3805 if (ret) 3806 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret); 3807 3808 return ret; 3809 } 3810 3811 static struct rte_cryptodev_ops crypto_ops = { 3812 .dev_configure = dpaa2_sec_dev_configure, 3813 .dev_start = dpaa2_sec_dev_start, 3814 .dev_stop = dpaa2_sec_dev_stop, 3815 .dev_close = dpaa2_sec_dev_close, 3816 .dev_infos_get = dpaa2_sec_dev_infos_get, 3817 .stats_get = dpaa2_sec_stats_get, 3818 .stats_reset = dpaa2_sec_stats_reset, 3819 .queue_pair_setup = dpaa2_sec_queue_pair_setup, 3820 .queue_pair_release = dpaa2_sec_queue_pair_release, 3821 .sym_session_get_size = dpaa2_sec_sym_session_get_size, 3822 .sym_session_configure = dpaa2_sec_sym_session_configure, 3823 .sym_session_clear = dpaa2_sec_sym_session_clear, 3824 /* Raw data-path API related operations */ 3825 .sym_get_raw_dp_ctx_size = dpaa2_sec_get_dp_ctx_size, 3826 .sym_configure_raw_dp_ctx = dpaa2_sec_configure_raw_dp_ctx, 3827 }; 3828 3829 #ifdef RTE_LIB_SECURITY 3830 static const struct rte_security_capability * 3831 dpaa2_sec_capabilities_get(void *device __rte_unused) 3832 { 3833 return dpaa2_sec_security_cap; 3834 } 3835 3836 static const struct rte_security_ops dpaa2_sec_security_ops = { 3837 .session_create = dpaa2_sec_security_session_create, 3838 .session_update = NULL, 3839 .session_stats_get = NULL, 3840 .session_destroy = dpaa2_sec_security_session_destroy, 3841 .set_pkt_metadata = NULL, 3842 .capabilities_get = dpaa2_sec_capabilities_get 3843 }; 3844 #endif 3845 3846 static int 3847 dpaa2_sec_uninit(const struct rte_cryptodev *dev) 3848 { 3849 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3850 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3851 int ret; 3852 3853 PMD_INIT_FUNC_TRACE(); 3854 3855 /* Function is reverse of dpaa2_sec_dev_init. 3856 * It does the following: 3857 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id 3858 * 2. Close the DPSECI device 3859 * 3. Free the allocated resources. 3860 */ 3861 3862 /*Close the device at underlying layer*/ 3863 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token); 3864 if (ret) { 3865 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret); 3866 return -1; 3867 } 3868 3869 /*Free the allocated memory for ethernet private data and dpseci*/ 3870 priv->hw = NULL; 3871 rte_free(dpseci); 3872 rte_free(dev->security_ctx); 3873 rte_mempool_free(priv->fle_pool); 3874 3875 DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u", 3876 dev->data->name, rte_socket_id()); 3877 3878 return 0; 3879 } 3880 3881 static int 3882 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev) 3883 { 3884 struct dpaa2_sec_dev_private *internals; 3885 struct rte_device *dev = cryptodev->device; 3886 struct rte_dpaa2_device *dpaa2_dev; 3887 #ifdef RTE_LIB_SECURITY 3888 struct rte_security_ctx *security_instance; 3889 #endif 3890 struct fsl_mc_io *dpseci; 3891 uint16_t token; 3892 struct dpseci_attr attr; 3893 int retcode, hw_id; 3894 char str[30]; 3895 3896 PMD_INIT_FUNC_TRACE(); 3897 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 3898 hw_id = dpaa2_dev->object_id; 3899 3900 cryptodev->driver_id = cryptodev_driver_id; 3901 cryptodev->dev_ops = &crypto_ops; 3902 3903 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst; 3904 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst; 3905 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 3906 RTE_CRYPTODEV_FF_HW_ACCELERATED | 3907 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 3908 RTE_CRYPTODEV_FF_SECURITY | 3909 RTE_CRYPTODEV_FF_SYM_RAW_DP | 3910 RTE_CRYPTODEV_FF_IN_PLACE_SGL | 3911 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | 3912 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | 3913 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | 3914 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; 3915 3916 internals = cryptodev->data->dev_private; 3917 3918 /* 3919 * For secondary processes, we don't initialise any further as primary 3920 * has already done this work. Only check we don't need a different 3921 * RX function 3922 */ 3923 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 3924 DPAA2_SEC_DEBUG("Device already init by primary process"); 3925 return 0; 3926 } 3927 #ifdef RTE_LIB_SECURITY 3928 /* Initialize security_ctx only for primary process*/ 3929 security_instance = rte_malloc("rte_security_instances_ops", 3930 sizeof(struct rte_security_ctx), 0); 3931 if (security_instance == NULL) 3932 return -ENOMEM; 3933 security_instance->device = (void *)cryptodev; 3934 security_instance->ops = &dpaa2_sec_security_ops; 3935 security_instance->sess_cnt = 0; 3936 cryptodev->security_ctx = security_instance; 3937 #endif 3938 /*Open the rte device via MC and save the handle for further use*/ 3939 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1, 3940 sizeof(struct fsl_mc_io), 0); 3941 if (!dpseci) { 3942 DPAA2_SEC_ERR( 3943 "Error in allocating the memory for dpsec object"); 3944 return -ENOMEM; 3945 } 3946 dpseci->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX); 3947 3948 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token); 3949 if (retcode != 0) { 3950 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x", 3951 retcode); 3952 goto init_error; 3953 } 3954 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr); 3955 if (retcode != 0) { 3956 DPAA2_SEC_ERR( 3957 "Cannot get dpsec device attributed: Error = %x", 3958 retcode); 3959 goto init_error; 3960 } 3961 snprintf(cryptodev->data->name, sizeof(cryptodev->data->name), 3962 "dpsec-%u", hw_id); 3963 3964 internals->max_nb_queue_pairs = attr.num_tx_queues; 3965 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs; 3966 internals->hw = dpseci; 3967 internals->token = token; 3968 3969 snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d", 3970 getpid(), cryptodev->data->dev_id); 3971 internals->fle_pool = rte_mempool_create((const char *)str, 3972 FLE_POOL_NUM_BUFS, 3973 FLE_POOL_BUF_SIZE, 3974 FLE_POOL_CACHE_SIZE, 0, 3975 NULL, NULL, NULL, NULL, 3976 SOCKET_ID_ANY, 0); 3977 if (!internals->fle_pool) { 3978 DPAA2_SEC_ERR("Mempool (%s) creation failed", str); 3979 goto init_error; 3980 } 3981 3982 DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name); 3983 return 0; 3984 3985 init_error: 3986 DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name); 3987 3988 /* dpaa2_sec_uninit(crypto_dev_name); */ 3989 return -EFAULT; 3990 } 3991 3992 static int 3993 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused, 3994 struct rte_dpaa2_device *dpaa2_dev) 3995 { 3996 struct rte_cryptodev *cryptodev; 3997 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 3998 3999 int retval; 4000 4001 snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d", 4002 dpaa2_dev->object_id); 4003 4004 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id()); 4005 if (cryptodev == NULL) 4006 return -ENOMEM; 4007 4008 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 4009 cryptodev->data->dev_private = rte_zmalloc_socket( 4010 "cryptodev private structure", 4011 sizeof(struct dpaa2_sec_dev_private), 4012 RTE_CACHE_LINE_SIZE, 4013 rte_socket_id()); 4014 4015 if (cryptodev->data->dev_private == NULL) 4016 rte_panic("Cannot allocate memzone for private " 4017 "device data"); 4018 } 4019 4020 dpaa2_dev->cryptodev = cryptodev; 4021 cryptodev->device = &dpaa2_dev->device; 4022 4023 /* init user callbacks */ 4024 TAILQ_INIT(&(cryptodev->link_intr_cbs)); 4025 4026 if (dpaa2_svr_family == SVR_LX2160A) 4027 rta_set_sec_era(RTA_SEC_ERA_10); 4028 else 4029 rta_set_sec_era(RTA_SEC_ERA_8); 4030 4031 DPAA2_SEC_INFO("2-SEC ERA is %d", rta_get_sec_era()); 4032 4033 /* Invoke PMD device initialization function */ 4034 retval = dpaa2_sec_dev_init(cryptodev); 4035 if (retval == 0) { 4036 rte_cryptodev_pmd_probing_finish(cryptodev); 4037 return 0; 4038 } 4039 4040 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 4041 rte_free(cryptodev->data->dev_private); 4042 4043 cryptodev->attached = RTE_CRYPTODEV_DETACHED; 4044 4045 return -ENXIO; 4046 } 4047 4048 static int 4049 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev) 4050 { 4051 struct rte_cryptodev *cryptodev; 4052 int ret; 4053 4054 cryptodev = dpaa2_dev->cryptodev; 4055 if (cryptodev == NULL) 4056 return -ENODEV; 4057 4058 ret = dpaa2_sec_uninit(cryptodev); 4059 if (ret) 4060 return ret; 4061 4062 return rte_cryptodev_pmd_destroy(cryptodev); 4063 } 4064 4065 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = { 4066 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA, 4067 .drv_type = DPAA2_CRYPTO, 4068 .driver = { 4069 .name = "DPAA2 SEC PMD" 4070 }, 4071 .probe = cryptodev_dpaa2_sec_probe, 4072 .remove = cryptodev_dpaa2_sec_remove, 4073 }; 4074 4075 static struct cryptodev_driver dpaa2_sec_crypto_drv; 4076 4077 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver); 4078 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, 4079 rte_dpaa2_sec_driver.driver, cryptodev_driver_id); 4080 RTE_LOG_REGISTER(dpaa2_logtype_sec, pmd.crypto.dpaa2, NOTICE); 4081