1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2016-2021 NXP 5 * 6 */ 7 8 #include <time.h> 9 #include <net/if.h> 10 #include <unistd.h> 11 12 #include <rte_ip.h> 13 #include <rte_mbuf.h> 14 #include <rte_cryptodev.h> 15 #include <rte_malloc.h> 16 #include <rte_memcpy.h> 17 #include <rte_string_fns.h> 18 #include <rte_cycles.h> 19 #include <rte_kvargs.h> 20 #include <rte_dev.h> 21 #include <cryptodev_pmd.h> 22 #include <rte_common.h> 23 #include <rte_fslmc.h> 24 #include <fslmc_vfio.h> 25 #include <dpaa2_hw_pvt.h> 26 #include <dpaa2_hw_dpio.h> 27 #include <dpaa2_hw_mempool.h> 28 #include <fsl_dpopr.h> 29 #include <fsl_dpseci.h> 30 #include <fsl_mc_sys.h> 31 32 #include "dpaa2_sec_priv.h" 33 #include "dpaa2_sec_event.h" 34 #include "dpaa2_sec_logs.h" 35 36 /* RTA header files */ 37 #include <desc/ipsec.h> 38 #include <desc/pdcp.h> 39 #include <desc/sdap.h> 40 #include <desc/algo.h> 41 42 /* Minimum job descriptor consists of a oneword job descriptor HEADER and 43 * a pointer to the shared descriptor 44 */ 45 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ) 46 #define FSL_VENDOR_ID 0x1957 47 #define FSL_DEVICE_ID 0x410 48 #define FSL_SUBSYSTEM_SEC 1 49 #define FSL_MC_DPSECI_DEVID 3 50 51 #define NO_PREFETCH 0 52 53 uint8_t cryptodev_driver_id; 54 55 #ifdef RTE_LIB_SECURITY 56 static inline int 57 build_proto_compound_sg_fd(dpaa2_sec_session *sess, 58 struct rte_crypto_op *op, 59 struct qbman_fd *fd, uint16_t bpid) 60 { 61 struct rte_crypto_sym_op *sym_op = op->sym; 62 struct ctxt_priv *priv = sess->ctxt; 63 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 64 struct sec_flow_context *flc; 65 struct rte_mbuf *mbuf; 66 uint32_t in_len = 0, out_len = 0; 67 68 if (sym_op->m_dst) 69 mbuf = sym_op->m_dst; 70 else 71 mbuf = sym_op->m_src; 72 73 /* first FLE entry used to store mbuf and session ctxt */ 74 fle = (struct qbman_fle *)rte_malloc(NULL, 75 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 76 RTE_CACHE_LINE_SIZE); 77 if (unlikely(!fle)) { 78 DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE"); 79 return -ENOMEM; 80 } 81 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 82 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 83 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 84 85 /* Save the shared descriptor */ 86 flc = &priv->flc_desc[0].flc; 87 88 op_fle = fle + 1; 89 ip_fle = fle + 2; 90 sge = fle + 3; 91 92 if (likely(bpid < MAX_BPID)) { 93 DPAA2_SET_FD_BPID(fd, bpid); 94 DPAA2_SET_FLE_BPID(op_fle, bpid); 95 DPAA2_SET_FLE_BPID(ip_fle, bpid); 96 } else { 97 DPAA2_SET_FD_IVP(fd); 98 DPAA2_SET_FLE_IVP(op_fle); 99 DPAA2_SET_FLE_IVP(ip_fle); 100 } 101 102 /* Configure FD as a FRAME LIST */ 103 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 104 DPAA2_SET_FD_COMPOUND_FMT(fd); 105 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 106 107 /* Configure Output FLE with Scatter/Gather Entry */ 108 DPAA2_SET_FLE_SG_EXT(op_fle); 109 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 110 111 /* Configure Output SGE for Encap/Decap */ 112 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 113 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 114 /* o/p segs */ 115 while (mbuf->next) { 116 sge->length = mbuf->data_len; 117 out_len += sge->length; 118 sge++; 119 mbuf = mbuf->next; 120 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 121 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 122 } 123 /* using buf_len for last buf - so that extra data can be added */ 124 sge->length = mbuf->buf_len - mbuf->data_off; 125 out_len += sge->length; 126 127 DPAA2_SET_FLE_FIN(sge); 128 op_fle->length = out_len; 129 130 sge++; 131 mbuf = sym_op->m_src; 132 133 /* Configure Input FLE with Scatter/Gather Entry */ 134 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 135 DPAA2_SET_FLE_SG_EXT(ip_fle); 136 DPAA2_SET_FLE_FIN(ip_fle); 137 138 /* Configure input SGE for Encap/Decap */ 139 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 140 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 141 sge->length = mbuf->data_len; 142 in_len += sge->length; 143 144 mbuf = mbuf->next; 145 /* i/p segs */ 146 while (mbuf) { 147 sge++; 148 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 149 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 150 sge->length = mbuf->data_len; 151 in_len += sge->length; 152 mbuf = mbuf->next; 153 } 154 ip_fle->length = in_len; 155 DPAA2_SET_FLE_FIN(sge); 156 157 /* In case of PDCP, per packet HFN is stored in 158 * mbuf priv after sym_op. 159 */ 160 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) { 161 uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op + 162 sess->pdcp.hfn_ovd_offset); 163 /*enable HFN override override */ 164 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd); 165 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd); 166 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd); 167 } 168 DPAA2_SET_FD_LEN(fd, ip_fle->length); 169 170 return 0; 171 } 172 173 static inline int 174 build_proto_compound_fd(dpaa2_sec_session *sess, 175 struct rte_crypto_op *op, 176 struct qbman_fd *fd, uint16_t bpid) 177 { 178 struct rte_crypto_sym_op *sym_op = op->sym; 179 struct ctxt_priv *priv = sess->ctxt; 180 struct qbman_fle *fle, *ip_fle, *op_fle; 181 struct sec_flow_context *flc; 182 struct rte_mbuf *src_mbuf = sym_op->m_src; 183 struct rte_mbuf *dst_mbuf = sym_op->m_dst; 184 int retval; 185 186 if (!dst_mbuf) 187 dst_mbuf = src_mbuf; 188 189 /* Save the shared descriptor */ 190 flc = &priv->flc_desc[0].flc; 191 192 /* we are using the first FLE entry to store Mbuf */ 193 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 194 if (retval) { 195 DPAA2_SEC_DP_ERR("Memory alloc failed"); 196 return -ENOMEM; 197 } 198 memset(fle, 0, FLE_POOL_BUF_SIZE); 199 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 200 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 201 202 op_fle = fle + 1; 203 ip_fle = fle + 2; 204 205 if (likely(bpid < MAX_BPID)) { 206 DPAA2_SET_FD_BPID(fd, bpid); 207 DPAA2_SET_FLE_BPID(op_fle, bpid); 208 DPAA2_SET_FLE_BPID(ip_fle, bpid); 209 } else { 210 DPAA2_SET_FD_IVP(fd); 211 DPAA2_SET_FLE_IVP(op_fle); 212 DPAA2_SET_FLE_IVP(ip_fle); 213 } 214 215 /* Configure FD as a FRAME LIST */ 216 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 217 DPAA2_SET_FD_COMPOUND_FMT(fd); 218 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 219 220 /* Configure Output FLE with dst mbuf data */ 221 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf)); 222 DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off); 223 DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len); 224 225 /* Configure Input FLE with src mbuf data */ 226 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf)); 227 DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off); 228 DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len); 229 230 DPAA2_SET_FD_LEN(fd, ip_fle->length); 231 DPAA2_SET_FLE_FIN(ip_fle); 232 233 /* In case of PDCP, per packet HFN is stored in 234 * mbuf priv after sym_op. 235 */ 236 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) { 237 uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op + 238 sess->pdcp.hfn_ovd_offset); 239 /*enable HFN override override */ 240 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd); 241 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd); 242 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd); 243 } 244 245 return 0; 246 247 } 248 249 static inline int 250 build_proto_fd(dpaa2_sec_session *sess, 251 struct rte_crypto_op *op, 252 struct qbman_fd *fd, uint16_t bpid) 253 { 254 struct rte_crypto_sym_op *sym_op = op->sym; 255 if (sym_op->m_dst) 256 return build_proto_compound_fd(sess, op, fd, bpid); 257 258 struct ctxt_priv *priv = sess->ctxt; 259 struct sec_flow_context *flc; 260 struct rte_mbuf *mbuf = sym_op->m_src; 261 262 if (likely(bpid < MAX_BPID)) 263 DPAA2_SET_FD_BPID(fd, bpid); 264 else 265 DPAA2_SET_FD_IVP(fd); 266 267 /* Save the shared descriptor */ 268 flc = &priv->flc_desc[0].flc; 269 270 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 271 DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off); 272 DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len); 273 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 274 275 /* save physical address of mbuf */ 276 op->sym->aead.digest.phys_addr = mbuf->buf_iova; 277 mbuf->buf_iova = (size_t)op; 278 279 return 0; 280 } 281 #endif 282 283 static inline int 284 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess, 285 struct rte_crypto_op *op, 286 struct qbman_fd *fd, __rte_unused uint16_t bpid) 287 { 288 struct rte_crypto_sym_op *sym_op = op->sym; 289 struct ctxt_priv *priv = sess->ctxt; 290 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 291 struct sec_flow_context *flc; 292 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len; 293 int icv_len = sess->digest_length; 294 uint8_t *old_icv; 295 struct rte_mbuf *mbuf; 296 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 297 sess->iv.offset); 298 299 if (sym_op->m_dst) 300 mbuf = sym_op->m_dst; 301 else 302 mbuf = sym_op->m_src; 303 304 /* first FLE entry used to store mbuf and session ctxt */ 305 fle = (struct qbman_fle *)rte_malloc(NULL, 306 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 307 RTE_CACHE_LINE_SIZE); 308 if (unlikely(!fle)) { 309 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE"); 310 return -ENOMEM; 311 } 312 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 313 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 314 DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv); 315 316 op_fle = fle + 1; 317 ip_fle = fle + 2; 318 sge = fle + 3; 319 320 /* Save the shared descriptor */ 321 flc = &priv->flc_desc[0].flc; 322 323 /* Configure FD as a FRAME LIST */ 324 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 325 DPAA2_SET_FD_COMPOUND_FMT(fd); 326 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 327 328 DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n" 329 "iv-len=%d data_off: 0x%x\n", 330 sym_op->aead.data.offset, 331 sym_op->aead.data.length, 332 sess->digest_length, 333 sess->iv.length, 334 sym_op->m_src->data_off); 335 336 /* Configure Output FLE with Scatter/Gather Entry */ 337 DPAA2_SET_FLE_SG_EXT(op_fle); 338 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 339 340 if (auth_only_len) 341 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len); 342 343 op_fle->length = (sess->dir == DIR_ENC) ? 344 (sym_op->aead.data.length + icv_len) : 345 sym_op->aead.data.length; 346 347 /* Configure Output SGE for Encap/Decap */ 348 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 349 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset); 350 sge->length = mbuf->data_len - sym_op->aead.data.offset; 351 352 mbuf = mbuf->next; 353 /* o/p segs */ 354 while (mbuf) { 355 sge++; 356 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 357 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 358 sge->length = mbuf->data_len; 359 mbuf = mbuf->next; 360 } 361 sge->length -= icv_len; 362 363 if (sess->dir == DIR_ENC) { 364 sge++; 365 DPAA2_SET_FLE_ADDR(sge, 366 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data)); 367 sge->length = icv_len; 368 } 369 DPAA2_SET_FLE_FIN(sge); 370 371 sge++; 372 mbuf = sym_op->m_src; 373 374 /* Configure Input FLE with Scatter/Gather Entry */ 375 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 376 DPAA2_SET_FLE_SG_EXT(ip_fle); 377 DPAA2_SET_FLE_FIN(ip_fle); 378 ip_fle->length = (sess->dir == DIR_ENC) ? 379 (sym_op->aead.data.length + sess->iv.length + auth_only_len) : 380 (sym_op->aead.data.length + sess->iv.length + auth_only_len + 381 icv_len); 382 383 /* Configure Input SGE for Encap/Decap */ 384 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr)); 385 sge->length = sess->iv.length; 386 387 sge++; 388 if (auth_only_len) { 389 DPAA2_SET_FLE_ADDR(sge, 390 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data)); 391 sge->length = auth_only_len; 392 sge++; 393 } 394 395 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 396 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + 397 mbuf->data_off); 398 sge->length = mbuf->data_len - sym_op->aead.data.offset; 399 400 mbuf = mbuf->next; 401 /* i/p segs */ 402 while (mbuf) { 403 sge++; 404 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 405 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 406 sge->length = mbuf->data_len; 407 mbuf = mbuf->next; 408 } 409 410 if (sess->dir == DIR_DEC) { 411 sge++; 412 old_icv = (uint8_t *)(sge + 1); 413 memcpy(old_icv, sym_op->aead.digest.data, icv_len); 414 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 415 sge->length = icv_len; 416 } 417 418 DPAA2_SET_FLE_FIN(sge); 419 if (auth_only_len) { 420 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len); 421 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 422 } 423 DPAA2_SET_FD_LEN(fd, ip_fle->length); 424 425 return 0; 426 } 427 428 static inline int 429 build_authenc_gcm_fd(dpaa2_sec_session *sess, 430 struct rte_crypto_op *op, 431 struct qbman_fd *fd, uint16_t bpid) 432 { 433 struct rte_crypto_sym_op *sym_op = op->sym; 434 struct ctxt_priv *priv = sess->ctxt; 435 struct qbman_fle *fle, *sge; 436 struct sec_flow_context *flc; 437 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len; 438 int icv_len = sess->digest_length, retval; 439 uint8_t *old_icv; 440 struct rte_mbuf *dst; 441 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 442 sess->iv.offset); 443 444 if (sym_op->m_dst) 445 dst = sym_op->m_dst; 446 else 447 dst = sym_op->m_src; 448 449 /* TODO we are using the first FLE entry to store Mbuf and session ctxt. 450 * Currently we donot know which FLE has the mbuf stored. 451 * So while retreiving we can go back 1 FLE from the FD -ADDR 452 * to get the MBUF Addr from the previous FLE. 453 * We can have a better approach to use the inline Mbuf 454 */ 455 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 456 if (retval) { 457 DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE"); 458 return -ENOMEM; 459 } 460 memset(fle, 0, FLE_POOL_BUF_SIZE); 461 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 462 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 463 fle = fle + 1; 464 sge = fle + 2; 465 if (likely(bpid < MAX_BPID)) { 466 DPAA2_SET_FD_BPID(fd, bpid); 467 DPAA2_SET_FLE_BPID(fle, bpid); 468 DPAA2_SET_FLE_BPID(fle + 1, bpid); 469 DPAA2_SET_FLE_BPID(sge, bpid); 470 DPAA2_SET_FLE_BPID(sge + 1, bpid); 471 DPAA2_SET_FLE_BPID(sge + 2, bpid); 472 DPAA2_SET_FLE_BPID(sge + 3, bpid); 473 } else { 474 DPAA2_SET_FD_IVP(fd); 475 DPAA2_SET_FLE_IVP(fle); 476 DPAA2_SET_FLE_IVP((fle + 1)); 477 DPAA2_SET_FLE_IVP(sge); 478 DPAA2_SET_FLE_IVP((sge + 1)); 479 DPAA2_SET_FLE_IVP((sge + 2)); 480 DPAA2_SET_FLE_IVP((sge + 3)); 481 } 482 483 /* Save the shared descriptor */ 484 flc = &priv->flc_desc[0].flc; 485 /* Configure FD as a FRAME LIST */ 486 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 487 DPAA2_SET_FD_COMPOUND_FMT(fd); 488 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 489 490 DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n" 491 "iv-len=%d data_off: 0x%x\n", 492 sym_op->aead.data.offset, 493 sym_op->aead.data.length, 494 sess->digest_length, 495 sess->iv.length, 496 sym_op->m_src->data_off); 497 498 /* Configure Output FLE with Scatter/Gather Entry */ 499 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 500 if (auth_only_len) 501 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 502 fle->length = (sess->dir == DIR_ENC) ? 503 (sym_op->aead.data.length + icv_len) : 504 sym_op->aead.data.length; 505 506 DPAA2_SET_FLE_SG_EXT(fle); 507 508 /* Configure Output SGE for Encap/Decap */ 509 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 510 DPAA2_SET_FLE_OFFSET(sge, dst->data_off + sym_op->aead.data.offset); 511 sge->length = sym_op->aead.data.length; 512 513 if (sess->dir == DIR_ENC) { 514 sge++; 515 DPAA2_SET_FLE_ADDR(sge, 516 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data)); 517 sge->length = sess->digest_length; 518 } 519 DPAA2_SET_FLE_FIN(sge); 520 521 sge++; 522 fle++; 523 524 /* Configure Input FLE with Scatter/Gather Entry */ 525 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 526 DPAA2_SET_FLE_SG_EXT(fle); 527 DPAA2_SET_FLE_FIN(fle); 528 fle->length = (sess->dir == DIR_ENC) ? 529 (sym_op->aead.data.length + sess->iv.length + auth_only_len) : 530 (sym_op->aead.data.length + sess->iv.length + auth_only_len + 531 sess->digest_length); 532 533 /* Configure Input SGE for Encap/Decap */ 534 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr)); 535 sge->length = sess->iv.length; 536 sge++; 537 if (auth_only_len) { 538 DPAA2_SET_FLE_ADDR(sge, 539 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data)); 540 sge->length = auth_only_len; 541 DPAA2_SET_FLE_BPID(sge, bpid); 542 sge++; 543 } 544 545 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 546 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + 547 sym_op->m_src->data_off); 548 sge->length = sym_op->aead.data.length; 549 if (sess->dir == DIR_DEC) { 550 sge++; 551 old_icv = (uint8_t *)(sge + 1); 552 memcpy(old_icv, sym_op->aead.digest.data, 553 sess->digest_length); 554 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 555 sge->length = sess->digest_length; 556 } 557 DPAA2_SET_FLE_FIN(sge); 558 559 if (auth_only_len) { 560 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 561 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 562 } 563 564 DPAA2_SET_FD_LEN(fd, fle->length); 565 return 0; 566 } 567 568 static inline int 569 build_authenc_sg_fd(dpaa2_sec_session *sess, 570 struct rte_crypto_op *op, 571 struct qbman_fd *fd, __rte_unused uint16_t bpid) 572 { 573 struct rte_crypto_sym_op *sym_op = op->sym; 574 struct ctxt_priv *priv = sess->ctxt; 575 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 576 struct sec_flow_context *flc; 577 uint16_t auth_hdr_len = sym_op->cipher.data.offset - 578 sym_op->auth.data.offset; 579 uint16_t auth_tail_len = sym_op->auth.data.length - 580 sym_op->cipher.data.length - auth_hdr_len; 581 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len; 582 int icv_len = sess->digest_length; 583 uint8_t *old_icv; 584 struct rte_mbuf *mbuf; 585 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 586 sess->iv.offset); 587 588 if (sym_op->m_dst) 589 mbuf = sym_op->m_dst; 590 else 591 mbuf = sym_op->m_src; 592 593 /* first FLE entry used to store mbuf and session ctxt */ 594 fle = (struct qbman_fle *)rte_malloc(NULL, 595 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 596 RTE_CACHE_LINE_SIZE); 597 if (unlikely(!fle)) { 598 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE"); 599 return -ENOMEM; 600 } 601 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 602 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 603 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 604 605 op_fle = fle + 1; 606 ip_fle = fle + 2; 607 sge = fle + 3; 608 609 /* Save the shared descriptor */ 610 flc = &priv->flc_desc[0].flc; 611 612 /* Configure FD as a FRAME LIST */ 613 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 614 DPAA2_SET_FD_COMPOUND_FMT(fd); 615 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 616 617 DPAA2_SEC_DP_DEBUG( 618 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n" 619 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", 620 sym_op->auth.data.offset, 621 sym_op->auth.data.length, 622 sess->digest_length, 623 sym_op->cipher.data.offset, 624 sym_op->cipher.data.length, 625 sess->iv.length, 626 sym_op->m_src->data_off); 627 628 /* Configure Output FLE with Scatter/Gather Entry */ 629 DPAA2_SET_FLE_SG_EXT(op_fle); 630 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 631 632 if (auth_only_len) 633 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len); 634 635 op_fle->length = (sess->dir == DIR_ENC) ? 636 (sym_op->cipher.data.length + icv_len) : 637 sym_op->cipher.data.length; 638 639 /* Configure Output SGE for Encap/Decap */ 640 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 641 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset); 642 sge->length = mbuf->data_len - sym_op->auth.data.offset; 643 644 mbuf = mbuf->next; 645 /* o/p segs */ 646 while (mbuf) { 647 sge++; 648 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 649 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 650 sge->length = mbuf->data_len; 651 mbuf = mbuf->next; 652 } 653 sge->length -= icv_len; 654 655 if (sess->dir == DIR_ENC) { 656 sge++; 657 DPAA2_SET_FLE_ADDR(sge, 658 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 659 sge->length = icv_len; 660 } 661 DPAA2_SET_FLE_FIN(sge); 662 663 sge++; 664 mbuf = sym_op->m_src; 665 666 /* Configure Input FLE with Scatter/Gather Entry */ 667 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 668 DPAA2_SET_FLE_SG_EXT(ip_fle); 669 DPAA2_SET_FLE_FIN(ip_fle); 670 ip_fle->length = (sess->dir == DIR_ENC) ? 671 (sym_op->auth.data.length + sess->iv.length) : 672 (sym_op->auth.data.length + sess->iv.length + 673 icv_len); 674 675 /* Configure Input SGE for Encap/Decap */ 676 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 677 sge->length = sess->iv.length; 678 679 sge++; 680 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 681 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + 682 mbuf->data_off); 683 sge->length = mbuf->data_len - sym_op->auth.data.offset; 684 685 mbuf = mbuf->next; 686 /* i/p segs */ 687 while (mbuf) { 688 sge++; 689 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 690 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 691 sge->length = mbuf->data_len; 692 mbuf = mbuf->next; 693 } 694 sge->length -= icv_len; 695 696 if (sess->dir == DIR_DEC) { 697 sge++; 698 old_icv = (uint8_t *)(sge + 1); 699 memcpy(old_icv, sym_op->auth.digest.data, 700 icv_len); 701 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 702 sge->length = icv_len; 703 } 704 705 DPAA2_SET_FLE_FIN(sge); 706 if (auth_only_len) { 707 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len); 708 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 709 } 710 DPAA2_SET_FD_LEN(fd, ip_fle->length); 711 712 return 0; 713 } 714 715 static inline int 716 build_authenc_fd(dpaa2_sec_session *sess, 717 struct rte_crypto_op *op, 718 struct qbman_fd *fd, uint16_t bpid) 719 { 720 struct rte_crypto_sym_op *sym_op = op->sym; 721 struct ctxt_priv *priv = sess->ctxt; 722 struct qbman_fle *fle, *sge; 723 struct sec_flow_context *flc; 724 uint16_t auth_hdr_len = sym_op->cipher.data.offset - 725 sym_op->auth.data.offset; 726 uint16_t auth_tail_len = sym_op->auth.data.length - 727 sym_op->cipher.data.length - auth_hdr_len; 728 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len; 729 730 int icv_len = sess->digest_length, retval; 731 uint8_t *old_icv; 732 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 733 sess->iv.offset); 734 struct rte_mbuf *dst; 735 736 if (sym_op->m_dst) 737 dst = sym_op->m_dst; 738 else 739 dst = sym_op->m_src; 740 741 /* we are using the first FLE entry to store Mbuf. 742 * Currently we donot know which FLE has the mbuf stored. 743 * So while retreiving we can go back 1 FLE from the FD -ADDR 744 * to get the MBUF Addr from the previous FLE. 745 * We can have a better approach to use the inline Mbuf 746 */ 747 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 748 if (retval) { 749 DPAA2_SEC_ERR("Memory alloc failed for SGE"); 750 return -ENOMEM; 751 } 752 memset(fle, 0, FLE_POOL_BUF_SIZE); 753 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 754 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 755 fle = fle + 1; 756 sge = fle + 2; 757 if (likely(bpid < MAX_BPID)) { 758 DPAA2_SET_FD_BPID(fd, bpid); 759 DPAA2_SET_FLE_BPID(fle, bpid); 760 DPAA2_SET_FLE_BPID(fle + 1, bpid); 761 DPAA2_SET_FLE_BPID(sge, bpid); 762 DPAA2_SET_FLE_BPID(sge + 1, bpid); 763 DPAA2_SET_FLE_BPID(sge + 2, bpid); 764 DPAA2_SET_FLE_BPID(sge + 3, bpid); 765 } else { 766 DPAA2_SET_FD_IVP(fd); 767 DPAA2_SET_FLE_IVP(fle); 768 DPAA2_SET_FLE_IVP((fle + 1)); 769 DPAA2_SET_FLE_IVP(sge); 770 DPAA2_SET_FLE_IVP((sge + 1)); 771 DPAA2_SET_FLE_IVP((sge + 2)); 772 DPAA2_SET_FLE_IVP((sge + 3)); 773 } 774 775 /* Save the shared descriptor */ 776 flc = &priv->flc_desc[0].flc; 777 /* Configure FD as a FRAME LIST */ 778 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 779 DPAA2_SET_FD_COMPOUND_FMT(fd); 780 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 781 782 DPAA2_SEC_DP_DEBUG( 783 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n" 784 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", 785 sym_op->auth.data.offset, 786 sym_op->auth.data.length, 787 sess->digest_length, 788 sym_op->cipher.data.offset, 789 sym_op->cipher.data.length, 790 sess->iv.length, 791 sym_op->m_src->data_off); 792 793 /* Configure Output FLE with Scatter/Gather Entry */ 794 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 795 if (auth_only_len) 796 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 797 fle->length = (sess->dir == DIR_ENC) ? 798 (sym_op->cipher.data.length + icv_len) : 799 sym_op->cipher.data.length; 800 801 DPAA2_SET_FLE_SG_EXT(fle); 802 803 /* Configure Output SGE for Encap/Decap */ 804 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 805 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + 806 dst->data_off); 807 sge->length = sym_op->cipher.data.length; 808 809 if (sess->dir == DIR_ENC) { 810 sge++; 811 DPAA2_SET_FLE_ADDR(sge, 812 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 813 sge->length = sess->digest_length; 814 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 815 sess->iv.length)); 816 } 817 DPAA2_SET_FLE_FIN(sge); 818 819 sge++; 820 fle++; 821 822 /* Configure Input FLE with Scatter/Gather Entry */ 823 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 824 DPAA2_SET_FLE_SG_EXT(fle); 825 DPAA2_SET_FLE_FIN(fle); 826 fle->length = (sess->dir == DIR_ENC) ? 827 (sym_op->auth.data.length + sess->iv.length) : 828 (sym_op->auth.data.length + sess->iv.length + 829 sess->digest_length); 830 831 /* Configure Input SGE for Encap/Decap */ 832 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 833 sge->length = sess->iv.length; 834 sge++; 835 836 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 837 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + 838 sym_op->m_src->data_off); 839 sge->length = sym_op->auth.data.length; 840 if (sess->dir == DIR_DEC) { 841 sge++; 842 old_icv = (uint8_t *)(sge + 1); 843 memcpy(old_icv, sym_op->auth.digest.data, 844 sess->digest_length); 845 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 846 sge->length = sess->digest_length; 847 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 848 sess->digest_length + 849 sess->iv.length)); 850 } 851 DPAA2_SET_FLE_FIN(sge); 852 if (auth_only_len) { 853 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 854 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 855 } 856 return 0; 857 } 858 859 static inline int build_auth_sg_fd( 860 dpaa2_sec_session *sess, 861 struct rte_crypto_op *op, 862 struct qbman_fd *fd, 863 __rte_unused uint16_t bpid) 864 { 865 struct rte_crypto_sym_op *sym_op = op->sym; 866 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 867 struct sec_flow_context *flc; 868 struct ctxt_priv *priv = sess->ctxt; 869 int data_len, data_offset; 870 uint8_t *old_digest; 871 struct rte_mbuf *mbuf; 872 873 data_len = sym_op->auth.data.length; 874 data_offset = sym_op->auth.data.offset; 875 876 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 877 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 878 if ((data_len & 7) || (data_offset & 7)) { 879 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes"); 880 return -ENOTSUP; 881 } 882 883 data_len = data_len >> 3; 884 data_offset = data_offset >> 3; 885 } 886 887 mbuf = sym_op->m_src; 888 fle = (struct qbman_fle *)rte_malloc(NULL, 889 FLE_SG_MEM_SIZE(mbuf->nb_segs), 890 RTE_CACHE_LINE_SIZE); 891 if (unlikely(!fle)) { 892 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE"); 893 return -ENOMEM; 894 } 895 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs)); 896 /* first FLE entry used to store mbuf and session ctxt */ 897 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 898 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 899 op_fle = fle + 1; 900 ip_fle = fle + 2; 901 sge = fle + 3; 902 903 flc = &priv->flc_desc[DESC_INITFINAL].flc; 904 /* sg FD */ 905 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 906 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 907 DPAA2_SET_FD_COMPOUND_FMT(fd); 908 909 /* o/p fle */ 910 DPAA2_SET_FLE_ADDR(op_fle, 911 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 912 op_fle->length = sess->digest_length; 913 914 /* i/p fle */ 915 DPAA2_SET_FLE_SG_EXT(ip_fle); 916 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 917 ip_fle->length = data_len; 918 919 if (sess->iv.length) { 920 uint8_t *iv_ptr; 921 922 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 923 sess->iv.offset); 924 925 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 926 iv_ptr = conv_to_snow_f9_iv(iv_ptr); 927 sge->length = 12; 928 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 929 iv_ptr = conv_to_zuc_eia_iv(iv_ptr); 930 sge->length = 8; 931 } else { 932 sge->length = sess->iv.length; 933 } 934 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 935 ip_fle->length += sge->length; 936 sge++; 937 } 938 /* i/p 1st seg */ 939 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 940 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off); 941 942 if (data_len <= (mbuf->data_len - data_offset)) { 943 sge->length = data_len; 944 data_len = 0; 945 } else { 946 sge->length = mbuf->data_len - data_offset; 947 948 /* remaining i/p segs */ 949 while ((data_len = data_len - sge->length) && 950 (mbuf = mbuf->next)) { 951 sge++; 952 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 953 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 954 if (data_len > mbuf->data_len) 955 sge->length = mbuf->data_len; 956 else 957 sge->length = data_len; 958 } 959 } 960 961 if (sess->dir == DIR_DEC) { 962 /* Digest verification case */ 963 sge++; 964 old_digest = (uint8_t *)(sge + 1); 965 rte_memcpy(old_digest, sym_op->auth.digest.data, 966 sess->digest_length); 967 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest)); 968 sge->length = sess->digest_length; 969 ip_fle->length += sess->digest_length; 970 } 971 DPAA2_SET_FLE_FIN(sge); 972 DPAA2_SET_FLE_FIN(ip_fle); 973 DPAA2_SET_FD_LEN(fd, ip_fle->length); 974 975 return 0; 976 } 977 978 static inline int 979 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 980 struct qbman_fd *fd, uint16_t bpid) 981 { 982 struct rte_crypto_sym_op *sym_op = op->sym; 983 struct qbman_fle *fle, *sge; 984 struct sec_flow_context *flc; 985 struct ctxt_priv *priv = sess->ctxt; 986 int data_len, data_offset; 987 uint8_t *old_digest; 988 int retval; 989 990 data_len = sym_op->auth.data.length; 991 data_offset = sym_op->auth.data.offset; 992 993 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 994 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 995 if ((data_len & 7) || (data_offset & 7)) { 996 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes"); 997 return -ENOTSUP; 998 } 999 1000 data_len = data_len >> 3; 1001 data_offset = data_offset >> 3; 1002 } 1003 1004 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 1005 if (retval) { 1006 DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE"); 1007 return -ENOMEM; 1008 } 1009 memset(fle, 0, FLE_POOL_BUF_SIZE); 1010 /* TODO we are using the first FLE entry to store Mbuf. 1011 * Currently we donot know which FLE has the mbuf stored. 1012 * So while retreiving we can go back 1 FLE from the FD -ADDR 1013 * to get the MBUF Addr from the previous FLE. 1014 * We can have a better approach to use the inline Mbuf 1015 */ 1016 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 1017 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 1018 fle = fle + 1; 1019 sge = fle + 2; 1020 1021 if (likely(bpid < MAX_BPID)) { 1022 DPAA2_SET_FD_BPID(fd, bpid); 1023 DPAA2_SET_FLE_BPID(fle, bpid); 1024 DPAA2_SET_FLE_BPID(fle + 1, bpid); 1025 DPAA2_SET_FLE_BPID(sge, bpid); 1026 DPAA2_SET_FLE_BPID(sge + 1, bpid); 1027 } else { 1028 DPAA2_SET_FD_IVP(fd); 1029 DPAA2_SET_FLE_IVP(fle); 1030 DPAA2_SET_FLE_IVP((fle + 1)); 1031 DPAA2_SET_FLE_IVP(sge); 1032 DPAA2_SET_FLE_IVP((sge + 1)); 1033 } 1034 1035 flc = &priv->flc_desc[DESC_INITFINAL].flc; 1036 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1037 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 1038 DPAA2_SET_FD_COMPOUND_FMT(fd); 1039 1040 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 1041 fle->length = sess->digest_length; 1042 fle++; 1043 1044 /* Setting input FLE */ 1045 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 1046 DPAA2_SET_FLE_SG_EXT(fle); 1047 fle->length = data_len; 1048 1049 if (sess->iv.length) { 1050 uint8_t *iv_ptr; 1051 1052 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1053 sess->iv.offset); 1054 1055 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 1056 iv_ptr = conv_to_snow_f9_iv(iv_ptr); 1057 sge->length = 12; 1058 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 1059 iv_ptr = conv_to_zuc_eia_iv(iv_ptr); 1060 sge->length = 8; 1061 } else { 1062 sge->length = sess->iv.length; 1063 } 1064 1065 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1066 fle->length = fle->length + sge->length; 1067 sge++; 1068 } 1069 1070 /* Setting data to authenticate */ 1071 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 1072 DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off); 1073 sge->length = data_len; 1074 1075 if (sess->dir == DIR_DEC) { 1076 sge++; 1077 old_digest = (uint8_t *)(sge + 1); 1078 rte_memcpy(old_digest, sym_op->auth.digest.data, 1079 sess->digest_length); 1080 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest)); 1081 sge->length = sess->digest_length; 1082 fle->length = fle->length + sess->digest_length; 1083 } 1084 1085 DPAA2_SET_FLE_FIN(sge); 1086 DPAA2_SET_FLE_FIN(fle); 1087 DPAA2_SET_FD_LEN(fd, fle->length); 1088 1089 return 0; 1090 } 1091 1092 static int 1093 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 1094 struct qbman_fd *fd, __rte_unused uint16_t bpid) 1095 { 1096 struct rte_crypto_sym_op *sym_op = op->sym; 1097 struct qbman_fle *ip_fle, *op_fle, *sge, *fle; 1098 int data_len, data_offset; 1099 struct sec_flow_context *flc; 1100 struct ctxt_priv *priv = sess->ctxt; 1101 struct rte_mbuf *mbuf; 1102 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1103 sess->iv.offset); 1104 1105 data_len = sym_op->cipher.data.length; 1106 data_offset = sym_op->cipher.data.offset; 1107 1108 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 1109 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 1110 if ((data_len & 7) || (data_offset & 7)) { 1111 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes"); 1112 return -ENOTSUP; 1113 } 1114 1115 data_len = data_len >> 3; 1116 data_offset = data_offset >> 3; 1117 } 1118 1119 if (sym_op->m_dst) 1120 mbuf = sym_op->m_dst; 1121 else 1122 mbuf = sym_op->m_src; 1123 1124 /* first FLE entry used to store mbuf and session ctxt */ 1125 fle = (struct qbman_fle *)rte_malloc(NULL, 1126 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 1127 RTE_CACHE_LINE_SIZE); 1128 if (!fle) { 1129 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE"); 1130 return -ENOMEM; 1131 } 1132 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 1133 /* first FLE entry used to store mbuf and session ctxt */ 1134 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 1135 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 1136 1137 op_fle = fle + 1; 1138 ip_fle = fle + 2; 1139 sge = fle + 3; 1140 1141 flc = &priv->flc_desc[0].flc; 1142 1143 DPAA2_SEC_DP_DEBUG( 1144 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d" 1145 " data_off: 0x%x\n", 1146 data_offset, 1147 data_len, 1148 sess->iv.length, 1149 sym_op->m_src->data_off); 1150 1151 /* o/p fle */ 1152 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 1153 op_fle->length = data_len; 1154 DPAA2_SET_FLE_SG_EXT(op_fle); 1155 1156 /* o/p 1st seg */ 1157 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1158 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off); 1159 sge->length = mbuf->data_len - data_offset; 1160 1161 mbuf = mbuf->next; 1162 /* o/p segs */ 1163 while (mbuf) { 1164 sge++; 1165 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1166 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 1167 sge->length = mbuf->data_len; 1168 mbuf = mbuf->next; 1169 } 1170 DPAA2_SET_FLE_FIN(sge); 1171 1172 DPAA2_SEC_DP_DEBUG( 1173 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n", 1174 flc, fle, fle->addr_hi, fle->addr_lo, 1175 fle->length); 1176 1177 /* i/p fle */ 1178 mbuf = sym_op->m_src; 1179 sge++; 1180 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 1181 ip_fle->length = sess->iv.length + data_len; 1182 DPAA2_SET_FLE_SG_EXT(ip_fle); 1183 1184 /* i/p IV */ 1185 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1186 DPAA2_SET_FLE_OFFSET(sge, 0); 1187 sge->length = sess->iv.length; 1188 1189 sge++; 1190 1191 /* i/p 1st seg */ 1192 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1193 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off); 1194 sge->length = mbuf->data_len - data_offset; 1195 1196 mbuf = mbuf->next; 1197 /* i/p segs */ 1198 while (mbuf) { 1199 sge++; 1200 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1201 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 1202 sge->length = mbuf->data_len; 1203 mbuf = mbuf->next; 1204 } 1205 DPAA2_SET_FLE_FIN(sge); 1206 DPAA2_SET_FLE_FIN(ip_fle); 1207 1208 /* sg fd */ 1209 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 1210 DPAA2_SET_FD_LEN(fd, ip_fle->length); 1211 DPAA2_SET_FD_COMPOUND_FMT(fd); 1212 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1213 1214 DPAA2_SEC_DP_DEBUG( 1215 "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d" 1216 " off =%d, len =%d\n", 1217 DPAA2_GET_FD_ADDR(fd), 1218 DPAA2_GET_FD_BPID(fd), 1219 rte_dpaa2_bpid_info[bpid].meta_data_size, 1220 DPAA2_GET_FD_OFFSET(fd), 1221 DPAA2_GET_FD_LEN(fd)); 1222 return 0; 1223 } 1224 1225 static int 1226 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 1227 struct qbman_fd *fd, uint16_t bpid) 1228 { 1229 struct rte_crypto_sym_op *sym_op = op->sym; 1230 struct qbman_fle *fle, *sge; 1231 int retval, data_len, data_offset; 1232 struct sec_flow_context *flc; 1233 struct ctxt_priv *priv = sess->ctxt; 1234 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1235 sess->iv.offset); 1236 struct rte_mbuf *dst; 1237 1238 data_len = sym_op->cipher.data.length; 1239 data_offset = sym_op->cipher.data.offset; 1240 1241 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 1242 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 1243 if ((data_len & 7) || (data_offset & 7)) { 1244 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes"); 1245 return -ENOTSUP; 1246 } 1247 1248 data_len = data_len >> 3; 1249 data_offset = data_offset >> 3; 1250 } 1251 1252 if (sym_op->m_dst) 1253 dst = sym_op->m_dst; 1254 else 1255 dst = sym_op->m_src; 1256 1257 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 1258 if (retval) { 1259 DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE"); 1260 return -ENOMEM; 1261 } 1262 memset(fle, 0, FLE_POOL_BUF_SIZE); 1263 /* TODO we are using the first FLE entry to store Mbuf. 1264 * Currently we donot know which FLE has the mbuf stored. 1265 * So while retreiving we can go back 1 FLE from the FD -ADDR 1266 * to get the MBUF Addr from the previous FLE. 1267 * We can have a better approach to use the inline Mbuf 1268 */ 1269 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 1270 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 1271 fle = fle + 1; 1272 sge = fle + 2; 1273 1274 if (likely(bpid < MAX_BPID)) { 1275 DPAA2_SET_FD_BPID(fd, bpid); 1276 DPAA2_SET_FLE_BPID(fle, bpid); 1277 DPAA2_SET_FLE_BPID(fle + 1, bpid); 1278 DPAA2_SET_FLE_BPID(sge, bpid); 1279 DPAA2_SET_FLE_BPID(sge + 1, bpid); 1280 } else { 1281 DPAA2_SET_FD_IVP(fd); 1282 DPAA2_SET_FLE_IVP(fle); 1283 DPAA2_SET_FLE_IVP((fle + 1)); 1284 DPAA2_SET_FLE_IVP(sge); 1285 DPAA2_SET_FLE_IVP((sge + 1)); 1286 } 1287 1288 flc = &priv->flc_desc[0].flc; 1289 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 1290 DPAA2_SET_FD_LEN(fd, data_len + sess->iv.length); 1291 DPAA2_SET_FD_COMPOUND_FMT(fd); 1292 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1293 1294 DPAA2_SEC_DP_DEBUG( 1295 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d," 1296 " data_off: 0x%x\n", 1297 data_offset, 1298 data_len, 1299 sess->iv.length, 1300 sym_op->m_src->data_off); 1301 1302 DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 1303 DPAA2_SET_FLE_OFFSET(fle, data_offset + dst->data_off); 1304 1305 fle->length = data_len + sess->iv.length; 1306 1307 DPAA2_SEC_DP_DEBUG( 1308 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n", 1309 flc, fle, fle->addr_hi, fle->addr_lo, 1310 fle->length); 1311 1312 fle++; 1313 1314 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 1315 fle->length = data_len + sess->iv.length; 1316 1317 DPAA2_SET_FLE_SG_EXT(fle); 1318 1319 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1320 sge->length = sess->iv.length; 1321 1322 sge++; 1323 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 1324 DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off); 1325 1326 sge->length = data_len; 1327 DPAA2_SET_FLE_FIN(sge); 1328 DPAA2_SET_FLE_FIN(fle); 1329 1330 DPAA2_SEC_DP_DEBUG( 1331 "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d" 1332 " off =%d, len =%d\n", 1333 DPAA2_GET_FD_ADDR(fd), 1334 DPAA2_GET_FD_BPID(fd), 1335 rte_dpaa2_bpid_info[bpid].meta_data_size, 1336 DPAA2_GET_FD_OFFSET(fd), 1337 DPAA2_GET_FD_LEN(fd)); 1338 1339 return 0; 1340 } 1341 1342 static inline int 1343 build_sec_fd(struct rte_crypto_op *op, 1344 struct qbman_fd *fd, uint16_t bpid) 1345 { 1346 int ret = -1; 1347 dpaa2_sec_session *sess; 1348 1349 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) 1350 sess = (dpaa2_sec_session *)get_sym_session_private_data( 1351 op->sym->session, cryptodev_driver_id); 1352 #ifdef RTE_LIB_SECURITY 1353 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) 1354 sess = (dpaa2_sec_session *)get_sec_session_private_data( 1355 op->sym->sec_session); 1356 #endif 1357 else 1358 return -ENOTSUP; 1359 1360 if (!sess) 1361 return -EINVAL; 1362 1363 /* Any of the buffer is segmented*/ 1364 if (!rte_pktmbuf_is_contiguous(op->sym->m_src) || 1365 ((op->sym->m_dst != NULL) && 1366 !rte_pktmbuf_is_contiguous(op->sym->m_dst))) { 1367 switch (sess->ctxt_type) { 1368 case DPAA2_SEC_CIPHER: 1369 ret = build_cipher_sg_fd(sess, op, fd, bpid); 1370 break; 1371 case DPAA2_SEC_AUTH: 1372 ret = build_auth_sg_fd(sess, op, fd, bpid); 1373 break; 1374 case DPAA2_SEC_AEAD: 1375 ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid); 1376 break; 1377 case DPAA2_SEC_CIPHER_HASH: 1378 ret = build_authenc_sg_fd(sess, op, fd, bpid); 1379 break; 1380 #ifdef RTE_LIB_SECURITY 1381 case DPAA2_SEC_IPSEC: 1382 case DPAA2_SEC_PDCP: 1383 ret = build_proto_compound_sg_fd(sess, op, fd, bpid); 1384 break; 1385 #endif 1386 case DPAA2_SEC_HASH_CIPHER: 1387 default: 1388 DPAA2_SEC_ERR("error: Unsupported session"); 1389 } 1390 } else { 1391 switch (sess->ctxt_type) { 1392 case DPAA2_SEC_CIPHER: 1393 ret = build_cipher_fd(sess, op, fd, bpid); 1394 break; 1395 case DPAA2_SEC_AUTH: 1396 ret = build_auth_fd(sess, op, fd, bpid); 1397 break; 1398 case DPAA2_SEC_AEAD: 1399 ret = build_authenc_gcm_fd(sess, op, fd, bpid); 1400 break; 1401 case DPAA2_SEC_CIPHER_HASH: 1402 ret = build_authenc_fd(sess, op, fd, bpid); 1403 break; 1404 #ifdef RTE_LIB_SECURITY 1405 case DPAA2_SEC_IPSEC: 1406 ret = build_proto_fd(sess, op, fd, bpid); 1407 break; 1408 case DPAA2_SEC_PDCP: 1409 ret = build_proto_compound_fd(sess, op, fd, bpid); 1410 break; 1411 #endif 1412 case DPAA2_SEC_HASH_CIPHER: 1413 default: 1414 DPAA2_SEC_ERR("error: Unsupported session"); 1415 ret = -ENOTSUP; 1416 } 1417 } 1418 return ret; 1419 } 1420 1421 static uint16_t 1422 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, 1423 uint16_t nb_ops) 1424 { 1425 /* Function to transmit the frames to given device and VQ*/ 1426 uint32_t loop; 1427 int32_t ret; 1428 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 1429 uint32_t frames_to_send, retry_count; 1430 struct qbman_eq_desc eqdesc; 1431 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 1432 struct qbman_swp *swp; 1433 uint16_t num_tx = 0; 1434 uint32_t flags[MAX_TX_RING_SLOTS] = {0}; 1435 /*todo - need to support multiple buffer pools */ 1436 uint16_t bpid; 1437 struct rte_mempool *mb_pool; 1438 1439 if (unlikely(nb_ops == 0)) 1440 return 0; 1441 1442 if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { 1443 DPAA2_SEC_ERR("sessionless crypto op not supported"); 1444 return 0; 1445 } 1446 /*Prepare enqueue descriptor*/ 1447 qbman_eq_desc_clear(&eqdesc); 1448 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); 1449 qbman_eq_desc_set_response(&eqdesc, 0, 0); 1450 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid); 1451 1452 if (!DPAA2_PER_LCORE_DPIO) { 1453 ret = dpaa2_affine_qbman_swp(); 1454 if (ret) { 1455 DPAA2_SEC_ERR( 1456 "Failed to allocate IO portal, tid: %d\n", 1457 rte_gettid()); 1458 return 0; 1459 } 1460 } 1461 swp = DPAA2_PER_LCORE_PORTAL; 1462 1463 while (nb_ops) { 1464 frames_to_send = (nb_ops > dpaa2_eqcr_size) ? 1465 dpaa2_eqcr_size : nb_ops; 1466 1467 for (loop = 0; loop < frames_to_send; loop++) { 1468 if (*dpaa2_seqn((*ops)->sym->m_src)) { 1469 uint8_t dqrr_index = 1470 *dpaa2_seqn((*ops)->sym->m_src) - 1; 1471 1472 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index; 1473 DPAA2_PER_LCORE_DQRR_SIZE--; 1474 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index); 1475 *dpaa2_seqn((*ops)->sym->m_src) = 1476 DPAA2_INVALID_MBUF_SEQN; 1477 } 1478 1479 /*Clear the unused FD fields before sending*/ 1480 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd)); 1481 mb_pool = (*ops)->sym->m_src->pool; 1482 bpid = mempool_to_bpid(mb_pool); 1483 ret = build_sec_fd(*ops, &fd_arr[loop], bpid); 1484 if (ret) { 1485 DPAA2_SEC_ERR("error: Improper packet contents" 1486 " for crypto operation"); 1487 goto skip_tx; 1488 } 1489 ops++; 1490 } 1491 1492 loop = 0; 1493 retry_count = 0; 1494 while (loop < frames_to_send) { 1495 ret = qbman_swp_enqueue_multiple(swp, &eqdesc, 1496 &fd_arr[loop], 1497 &flags[loop], 1498 frames_to_send - loop); 1499 if (unlikely(ret < 0)) { 1500 retry_count++; 1501 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) { 1502 num_tx += loop; 1503 nb_ops -= loop; 1504 goto skip_tx; 1505 } 1506 } else { 1507 loop += ret; 1508 retry_count = 0; 1509 } 1510 } 1511 1512 num_tx += loop; 1513 nb_ops -= loop; 1514 } 1515 skip_tx: 1516 dpaa2_qp->tx_vq.tx_pkts += num_tx; 1517 dpaa2_qp->tx_vq.err_pkts += nb_ops; 1518 return num_tx; 1519 } 1520 1521 #ifdef RTE_LIB_SECURITY 1522 static inline struct rte_crypto_op * 1523 sec_simple_fd_to_mbuf(const struct qbman_fd *fd) 1524 { 1525 struct rte_crypto_op *op; 1526 uint16_t len = DPAA2_GET_FD_LEN(fd); 1527 int16_t diff = 0; 1528 dpaa2_sec_session *sess_priv __rte_unused; 1529 1530 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF( 1531 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)), 1532 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 1533 1534 diff = len - mbuf->pkt_len; 1535 mbuf->pkt_len += diff; 1536 mbuf->data_len += diff; 1537 op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova; 1538 mbuf->buf_iova = op->sym->aead.digest.phys_addr; 1539 op->sym->aead.digest.phys_addr = 0L; 1540 1541 sess_priv = (dpaa2_sec_session *)get_sec_session_private_data( 1542 op->sym->sec_session); 1543 if (sess_priv->dir == DIR_ENC) 1544 mbuf->data_off += SEC_FLC_DHR_OUTBOUND; 1545 else 1546 mbuf->data_off += SEC_FLC_DHR_INBOUND; 1547 1548 return op; 1549 } 1550 #endif 1551 1552 static inline struct rte_crypto_op * 1553 sec_fd_to_mbuf(const struct qbman_fd *fd) 1554 { 1555 struct qbman_fle *fle; 1556 struct rte_crypto_op *op; 1557 struct ctxt_priv *priv; 1558 struct rte_mbuf *dst, *src; 1559 1560 #ifdef RTE_LIB_SECURITY 1561 if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single) 1562 return sec_simple_fd_to_mbuf(fd); 1563 #endif 1564 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); 1565 1566 DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n", 1567 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset); 1568 1569 /* we are using the first FLE entry to store Mbuf. 1570 * Currently we donot know which FLE has the mbuf stored. 1571 * So while retreiving we can go back 1 FLE from the FD -ADDR 1572 * to get the MBUF Addr from the previous FLE. 1573 * We can have a better approach to use the inline Mbuf 1574 */ 1575 1576 if (unlikely(DPAA2_GET_FD_IVP(fd))) { 1577 /* TODO complete it. */ 1578 DPAA2_SEC_ERR("error: non inline buffer"); 1579 return NULL; 1580 } 1581 op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1)); 1582 1583 /* Prefeth op */ 1584 src = op->sym->m_src; 1585 rte_prefetch0(src); 1586 1587 if (op->sym->m_dst) { 1588 dst = op->sym->m_dst; 1589 rte_prefetch0(dst); 1590 } else 1591 dst = src; 1592 1593 #ifdef RTE_LIB_SECURITY 1594 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 1595 uint16_t len = DPAA2_GET_FD_LEN(fd); 1596 dst->pkt_len = len; 1597 while (dst->next != NULL) { 1598 len -= dst->data_len; 1599 dst = dst->next; 1600 } 1601 dst->data_len = len; 1602 } 1603 #endif 1604 DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p," 1605 " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n", 1606 (void *)dst, 1607 dst->buf_addr, 1608 DPAA2_GET_FD_ADDR(fd), 1609 DPAA2_GET_FD_BPID(fd), 1610 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, 1611 DPAA2_GET_FD_OFFSET(fd), 1612 DPAA2_GET_FD_LEN(fd)); 1613 1614 /* free the fle memory */ 1615 if (likely(rte_pktmbuf_is_contiguous(src))) { 1616 priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1); 1617 rte_mempool_put(priv->fle_pool, (void *)(fle-1)); 1618 } else 1619 rte_free((void *)(fle-1)); 1620 1621 return op; 1622 } 1623 1624 static uint16_t 1625 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, 1626 uint16_t nb_ops) 1627 { 1628 /* Function is responsible to receive frames for a given device and VQ*/ 1629 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 1630 struct qbman_result *dq_storage; 1631 uint32_t fqid = dpaa2_qp->rx_vq.fqid; 1632 int ret, num_rx = 0; 1633 uint8_t is_last = 0, status; 1634 struct qbman_swp *swp; 1635 const struct qbman_fd *fd; 1636 struct qbman_pull_desc pulldesc; 1637 1638 if (!DPAA2_PER_LCORE_DPIO) { 1639 ret = dpaa2_affine_qbman_swp(); 1640 if (ret) { 1641 DPAA2_SEC_ERR( 1642 "Failed to allocate IO portal, tid: %d\n", 1643 rte_gettid()); 1644 return 0; 1645 } 1646 } 1647 swp = DPAA2_PER_LCORE_PORTAL; 1648 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0]; 1649 1650 qbman_pull_desc_clear(&pulldesc); 1651 qbman_pull_desc_set_numframes(&pulldesc, 1652 (nb_ops > dpaa2_dqrr_size) ? 1653 dpaa2_dqrr_size : nb_ops); 1654 qbman_pull_desc_set_fq(&pulldesc, fqid); 1655 qbman_pull_desc_set_storage(&pulldesc, dq_storage, 1656 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage), 1657 1); 1658 1659 /*Issue a volatile dequeue command. */ 1660 while (1) { 1661 if (qbman_swp_pull(swp, &pulldesc)) { 1662 DPAA2_SEC_WARN( 1663 "SEC VDQ command is not issued : QBMAN busy"); 1664 /* Portal was busy, try again */ 1665 continue; 1666 } 1667 break; 1668 }; 1669 1670 /* Receive the packets till Last Dequeue entry is found with 1671 * respect to the above issues PULL command. 1672 */ 1673 while (!is_last) { 1674 /* Check if the previous issued command is completed. 1675 * Also seems like the SWP is shared between the Ethernet Driver 1676 * and the SEC driver. 1677 */ 1678 while (!qbman_check_command_complete(dq_storage)) 1679 ; 1680 1681 /* Loop until the dq_storage is updated with 1682 * new token by QBMAN 1683 */ 1684 while (!qbman_check_new_result(dq_storage)) 1685 ; 1686 /* Check whether Last Pull command is Expired and 1687 * setting Condition for Loop termination 1688 */ 1689 if (qbman_result_DQ_is_pull_complete(dq_storage)) { 1690 is_last = 1; 1691 /* Check for valid frame. */ 1692 status = (uint8_t)qbman_result_DQ_flags(dq_storage); 1693 if (unlikely( 1694 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) { 1695 DPAA2_SEC_DP_DEBUG("No frame is delivered\n"); 1696 continue; 1697 } 1698 } 1699 1700 fd = qbman_result_DQ_fd(dq_storage); 1701 ops[num_rx] = sec_fd_to_mbuf(fd); 1702 1703 if (unlikely(fd->simple.frc)) { 1704 /* TODO Parse SEC errors */ 1705 DPAA2_SEC_DP_ERR("SEC returned Error - %x\n", 1706 fd->simple.frc); 1707 dpaa2_qp->rx_vq.err_pkts += 1; 1708 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR; 1709 } else { 1710 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 1711 } 1712 1713 num_rx++; 1714 dq_storage++; 1715 } /* End of Packet Rx loop */ 1716 1717 dpaa2_qp->rx_vq.rx_pkts += num_rx; 1718 1719 DPAA2_SEC_DP_DEBUG("SEC RX pkts %d err pkts %" PRIu64 "\n", num_rx, 1720 dpaa2_qp->rx_vq.err_pkts); 1721 /*Return the total number of packets received to DPAA2 app*/ 1722 return num_rx; 1723 } 1724 1725 /** Release queue pair */ 1726 static int 1727 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) 1728 { 1729 struct dpaa2_sec_qp *qp = 1730 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id]; 1731 1732 PMD_INIT_FUNC_TRACE(); 1733 1734 if (qp->rx_vq.q_storage) { 1735 dpaa2_free_dq_storage(qp->rx_vq.q_storage); 1736 rte_free(qp->rx_vq.q_storage); 1737 } 1738 rte_free(qp); 1739 1740 dev->data->queue_pairs[queue_pair_id] = NULL; 1741 1742 return 0; 1743 } 1744 1745 /** Setup a queue pair */ 1746 static int 1747 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, 1748 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf, 1749 __rte_unused int socket_id) 1750 { 1751 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 1752 struct dpaa2_sec_qp *qp; 1753 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 1754 struct dpseci_rx_queue_cfg cfg; 1755 int32_t retcode; 1756 1757 PMD_INIT_FUNC_TRACE(); 1758 1759 /* If qp is already in use free ring memory and qp metadata. */ 1760 if (dev->data->queue_pairs[qp_id] != NULL) { 1761 DPAA2_SEC_INFO("QP already setup"); 1762 return 0; 1763 } 1764 1765 DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p", 1766 dev, qp_id, qp_conf); 1767 1768 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 1769 1770 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp), 1771 RTE_CACHE_LINE_SIZE); 1772 if (!qp) { 1773 DPAA2_SEC_ERR("malloc failed for rx/tx queues"); 1774 return -ENOMEM; 1775 } 1776 1777 qp->rx_vq.crypto_data = dev->data; 1778 qp->tx_vq.crypto_data = dev->data; 1779 qp->rx_vq.q_storage = rte_malloc("sec dq storage", 1780 sizeof(struct queue_storage_info_t), 1781 RTE_CACHE_LINE_SIZE); 1782 if (!qp->rx_vq.q_storage) { 1783 DPAA2_SEC_ERR("malloc failed for q_storage"); 1784 return -ENOMEM; 1785 } 1786 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t)); 1787 1788 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) { 1789 DPAA2_SEC_ERR("Unable to allocate dequeue storage"); 1790 return -ENOMEM; 1791 } 1792 1793 dev->data->queue_pairs[qp_id] = qp; 1794 1795 cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX; 1796 cfg.user_ctx = (size_t)(&qp->rx_vq); 1797 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 1798 qp_id, &cfg); 1799 return retcode; 1800 } 1801 1802 /** Returns the size of the aesni gcm session structure */ 1803 static unsigned int 1804 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) 1805 { 1806 PMD_INIT_FUNC_TRACE(); 1807 1808 return sizeof(dpaa2_sec_session); 1809 } 1810 1811 static int 1812 dpaa2_sec_cipher_init(struct rte_cryptodev *dev, 1813 struct rte_crypto_sym_xform *xform, 1814 dpaa2_sec_session *session) 1815 { 1816 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1817 struct alginfo cipherdata; 1818 int bufsize, ret = 0; 1819 struct ctxt_priv *priv; 1820 struct sec_flow_context *flc; 1821 1822 PMD_INIT_FUNC_TRACE(); 1823 1824 /* For SEC CIPHER only one descriptor is required. */ 1825 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1826 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 1827 RTE_CACHE_LINE_SIZE); 1828 if (priv == NULL) { 1829 DPAA2_SEC_ERR("No Memory for priv CTXT"); 1830 return -ENOMEM; 1831 } 1832 1833 priv->fle_pool = dev_priv->fle_pool; 1834 1835 flc = &priv->flc_desc[0].flc; 1836 1837 session->ctxt_type = DPAA2_SEC_CIPHER; 1838 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, 1839 RTE_CACHE_LINE_SIZE); 1840 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) { 1841 DPAA2_SEC_ERR("No Memory for cipher key"); 1842 rte_free(priv); 1843 return -ENOMEM; 1844 } 1845 session->cipher_key.length = xform->cipher.key.length; 1846 1847 memcpy(session->cipher_key.data, xform->cipher.key.data, 1848 xform->cipher.key.length); 1849 cipherdata.key = (size_t)session->cipher_key.data; 1850 cipherdata.keylen = session->cipher_key.length; 1851 cipherdata.key_enc_flags = 0; 1852 cipherdata.key_type = RTA_DATA_IMM; 1853 1854 /* Set IV parameters */ 1855 session->iv.offset = xform->cipher.iv.offset; 1856 session->iv.length = xform->cipher.iv.length; 1857 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1858 DIR_ENC : DIR_DEC; 1859 1860 switch (xform->cipher.algo) { 1861 case RTE_CRYPTO_CIPHER_AES_CBC: 1862 cipherdata.algtype = OP_ALG_ALGSEL_AES; 1863 cipherdata.algmode = OP_ALG_AAI_CBC; 1864 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 1865 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 1866 SHR_NEVER, &cipherdata, 1867 session->iv.length, 1868 session->dir); 1869 break; 1870 case RTE_CRYPTO_CIPHER_3DES_CBC: 1871 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 1872 cipherdata.algmode = OP_ALG_AAI_CBC; 1873 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 1874 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 1875 SHR_NEVER, &cipherdata, 1876 session->iv.length, 1877 session->dir); 1878 break; 1879 case RTE_CRYPTO_CIPHER_DES_CBC: 1880 cipherdata.algtype = OP_ALG_ALGSEL_DES; 1881 cipherdata.algmode = OP_ALG_AAI_CBC; 1882 session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC; 1883 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 1884 SHR_NEVER, &cipherdata, 1885 session->iv.length, 1886 session->dir); 1887 break; 1888 case RTE_CRYPTO_CIPHER_AES_CTR: 1889 cipherdata.algtype = OP_ALG_ALGSEL_AES; 1890 cipherdata.algmode = OP_ALG_AAI_CTR; 1891 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 1892 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 1893 SHR_NEVER, &cipherdata, 1894 session->iv.length, 1895 session->dir); 1896 break; 1897 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 1898 cipherdata.algtype = OP_ALG_ALGSEL_SNOW_F8; 1899 session->cipher_alg = RTE_CRYPTO_CIPHER_SNOW3G_UEA2; 1900 bufsize = cnstr_shdsc_snow_f8(priv->flc_desc[0].desc, 1, 0, 1901 &cipherdata, 1902 session->dir); 1903 break; 1904 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 1905 cipherdata.algtype = OP_ALG_ALGSEL_ZUCE; 1906 session->cipher_alg = RTE_CRYPTO_CIPHER_ZUC_EEA3; 1907 bufsize = cnstr_shdsc_zuce(priv->flc_desc[0].desc, 1, 0, 1908 &cipherdata, 1909 session->dir); 1910 break; 1911 case RTE_CRYPTO_CIPHER_KASUMI_F8: 1912 case RTE_CRYPTO_CIPHER_AES_F8: 1913 case RTE_CRYPTO_CIPHER_AES_ECB: 1914 case RTE_CRYPTO_CIPHER_3DES_ECB: 1915 case RTE_CRYPTO_CIPHER_3DES_CTR: 1916 case RTE_CRYPTO_CIPHER_AES_XTS: 1917 case RTE_CRYPTO_CIPHER_ARC4: 1918 case RTE_CRYPTO_CIPHER_NULL: 1919 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", 1920 xform->cipher.algo); 1921 ret = -ENOTSUP; 1922 goto error_out; 1923 default: 1924 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 1925 xform->cipher.algo); 1926 ret = -ENOTSUP; 1927 goto error_out; 1928 } 1929 1930 if (bufsize < 0) { 1931 DPAA2_SEC_ERR("Crypto: Descriptor build failed"); 1932 ret = -EINVAL; 1933 goto error_out; 1934 } 1935 1936 flc->word1_sdl = (uint8_t)bufsize; 1937 session->ctxt = priv; 1938 1939 #ifdef CAAM_DESC_DEBUG 1940 int i; 1941 for (i = 0; i < bufsize; i++) 1942 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]); 1943 #endif 1944 return ret; 1945 1946 error_out: 1947 rte_free(session->cipher_key.data); 1948 rte_free(priv); 1949 return ret; 1950 } 1951 1952 static int 1953 dpaa2_sec_auth_init(struct rte_cryptodev *dev, 1954 struct rte_crypto_sym_xform *xform, 1955 dpaa2_sec_session *session) 1956 { 1957 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1958 struct alginfo authdata; 1959 int bufsize, ret = 0; 1960 struct ctxt_priv *priv; 1961 struct sec_flow_context *flc; 1962 1963 PMD_INIT_FUNC_TRACE(); 1964 1965 /* For SEC AUTH three descriptors are required for various stages */ 1966 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1967 sizeof(struct ctxt_priv) + 3 * 1968 sizeof(struct sec_flc_desc), 1969 RTE_CACHE_LINE_SIZE); 1970 if (priv == NULL) { 1971 DPAA2_SEC_ERR("No Memory for priv CTXT"); 1972 return -ENOMEM; 1973 } 1974 1975 priv->fle_pool = dev_priv->fle_pool; 1976 flc = &priv->flc_desc[DESC_INITFINAL].flc; 1977 1978 session->ctxt_type = DPAA2_SEC_AUTH; 1979 session->auth_key.length = xform->auth.key.length; 1980 if (xform->auth.key.length) { 1981 session->auth_key.data = rte_zmalloc(NULL, 1982 xform->auth.key.length, 1983 RTE_CACHE_LINE_SIZE); 1984 if (session->auth_key.data == NULL) { 1985 DPAA2_SEC_ERR("Unable to allocate memory for auth key"); 1986 rte_free(priv); 1987 return -ENOMEM; 1988 } 1989 memcpy(session->auth_key.data, xform->auth.key.data, 1990 xform->auth.key.length); 1991 authdata.key = (size_t)session->auth_key.data; 1992 authdata.key_enc_flags = 0; 1993 authdata.key_type = RTA_DATA_IMM; 1994 } 1995 authdata.keylen = session->auth_key.length; 1996 1997 session->digest_length = xform->auth.digest_length; 1998 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? 1999 DIR_ENC : DIR_DEC; 2000 2001 switch (xform->auth.algo) { 2002 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2003 authdata.algtype = OP_ALG_ALGSEL_SHA1; 2004 authdata.algmode = OP_ALG_AAI_HMAC; 2005 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 2006 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2007 1, 0, SHR_NEVER, &authdata, 2008 !session->dir, 2009 session->digest_length); 2010 break; 2011 case RTE_CRYPTO_AUTH_MD5_HMAC: 2012 authdata.algtype = OP_ALG_ALGSEL_MD5; 2013 authdata.algmode = OP_ALG_AAI_HMAC; 2014 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 2015 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2016 1, 0, SHR_NEVER, &authdata, 2017 !session->dir, 2018 session->digest_length); 2019 break; 2020 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2021 authdata.algtype = OP_ALG_ALGSEL_SHA256; 2022 authdata.algmode = OP_ALG_AAI_HMAC; 2023 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 2024 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2025 1, 0, SHR_NEVER, &authdata, 2026 !session->dir, 2027 session->digest_length); 2028 break; 2029 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2030 authdata.algtype = OP_ALG_ALGSEL_SHA384; 2031 authdata.algmode = OP_ALG_AAI_HMAC; 2032 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 2033 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2034 1, 0, SHR_NEVER, &authdata, 2035 !session->dir, 2036 session->digest_length); 2037 break; 2038 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2039 authdata.algtype = OP_ALG_ALGSEL_SHA512; 2040 authdata.algmode = OP_ALG_AAI_HMAC; 2041 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 2042 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2043 1, 0, SHR_NEVER, &authdata, 2044 !session->dir, 2045 session->digest_length); 2046 break; 2047 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2048 authdata.algtype = OP_ALG_ALGSEL_SHA224; 2049 authdata.algmode = OP_ALG_AAI_HMAC; 2050 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 2051 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2052 1, 0, SHR_NEVER, &authdata, 2053 !session->dir, 2054 session->digest_length); 2055 break; 2056 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2057 authdata.algtype = OP_ALG_ALGSEL_SNOW_F9; 2058 authdata.algmode = OP_ALG_AAI_F9; 2059 session->auth_alg = RTE_CRYPTO_AUTH_SNOW3G_UIA2; 2060 session->iv.offset = xform->auth.iv.offset; 2061 session->iv.length = xform->auth.iv.length; 2062 bufsize = cnstr_shdsc_snow_f9(priv->flc_desc[DESC_INITFINAL].desc, 2063 1, 0, &authdata, 2064 !session->dir, 2065 session->digest_length); 2066 break; 2067 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2068 authdata.algtype = OP_ALG_ALGSEL_ZUCA; 2069 authdata.algmode = OP_ALG_AAI_F9; 2070 session->auth_alg = RTE_CRYPTO_AUTH_ZUC_EIA3; 2071 session->iv.offset = xform->auth.iv.offset; 2072 session->iv.length = xform->auth.iv.length; 2073 bufsize = cnstr_shdsc_zuca(priv->flc_desc[DESC_INITFINAL].desc, 2074 1, 0, &authdata, 2075 !session->dir, 2076 session->digest_length); 2077 break; 2078 case RTE_CRYPTO_AUTH_SHA1: 2079 authdata.algtype = OP_ALG_ALGSEL_SHA1; 2080 authdata.algmode = OP_ALG_AAI_HASH; 2081 session->auth_alg = RTE_CRYPTO_AUTH_SHA1; 2082 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2083 1, 0, SHR_NEVER, &authdata, 2084 !session->dir, 2085 session->digest_length); 2086 break; 2087 case RTE_CRYPTO_AUTH_MD5: 2088 authdata.algtype = OP_ALG_ALGSEL_MD5; 2089 authdata.algmode = OP_ALG_AAI_HASH; 2090 session->auth_alg = RTE_CRYPTO_AUTH_MD5; 2091 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2092 1, 0, SHR_NEVER, &authdata, 2093 !session->dir, 2094 session->digest_length); 2095 break; 2096 case RTE_CRYPTO_AUTH_SHA256: 2097 authdata.algtype = OP_ALG_ALGSEL_SHA256; 2098 authdata.algmode = OP_ALG_AAI_HASH; 2099 session->auth_alg = RTE_CRYPTO_AUTH_SHA256; 2100 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2101 1, 0, SHR_NEVER, &authdata, 2102 !session->dir, 2103 session->digest_length); 2104 break; 2105 case RTE_CRYPTO_AUTH_SHA384: 2106 authdata.algtype = OP_ALG_ALGSEL_SHA384; 2107 authdata.algmode = OP_ALG_AAI_HASH; 2108 session->auth_alg = RTE_CRYPTO_AUTH_SHA384; 2109 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2110 1, 0, SHR_NEVER, &authdata, 2111 !session->dir, 2112 session->digest_length); 2113 break; 2114 case RTE_CRYPTO_AUTH_SHA512: 2115 authdata.algtype = OP_ALG_ALGSEL_SHA512; 2116 authdata.algmode = OP_ALG_AAI_HASH; 2117 session->auth_alg = RTE_CRYPTO_AUTH_SHA512; 2118 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2119 1, 0, SHR_NEVER, &authdata, 2120 !session->dir, 2121 session->digest_length); 2122 break; 2123 case RTE_CRYPTO_AUTH_SHA224: 2124 authdata.algtype = OP_ALG_ALGSEL_SHA224; 2125 authdata.algmode = OP_ALG_AAI_HASH; 2126 session->auth_alg = RTE_CRYPTO_AUTH_SHA224; 2127 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2128 1, 0, SHR_NEVER, &authdata, 2129 !session->dir, 2130 session->digest_length); 2131 break; 2132 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2133 authdata.algtype = OP_ALG_ALGSEL_AES; 2134 authdata.algmode = OP_ALG_AAI_XCBC_MAC; 2135 session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC; 2136 bufsize = cnstr_shdsc_aes_mac( 2137 priv->flc_desc[DESC_INITFINAL].desc, 2138 1, 0, SHR_NEVER, &authdata, 2139 !session->dir, 2140 session->digest_length); 2141 break; 2142 case RTE_CRYPTO_AUTH_AES_CMAC: 2143 authdata.algtype = OP_ALG_ALGSEL_AES; 2144 authdata.algmode = OP_ALG_AAI_CMAC; 2145 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC; 2146 bufsize = cnstr_shdsc_aes_mac( 2147 priv->flc_desc[DESC_INITFINAL].desc, 2148 1, 0, SHR_NEVER, &authdata, 2149 !session->dir, 2150 session->digest_length); 2151 break; 2152 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 2153 case RTE_CRYPTO_AUTH_AES_GMAC: 2154 case RTE_CRYPTO_AUTH_KASUMI_F9: 2155 case RTE_CRYPTO_AUTH_NULL: 2156 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un", 2157 xform->auth.algo); 2158 ret = -ENOTSUP; 2159 goto error_out; 2160 default: 2161 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", 2162 xform->auth.algo); 2163 ret = -ENOTSUP; 2164 goto error_out; 2165 } 2166 2167 if (bufsize < 0) { 2168 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2169 ret = -EINVAL; 2170 goto error_out; 2171 } 2172 2173 flc->word1_sdl = (uint8_t)bufsize; 2174 session->ctxt = priv; 2175 #ifdef CAAM_DESC_DEBUG 2176 int i; 2177 for (i = 0; i < bufsize; i++) 2178 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", 2179 i, priv->flc_desc[DESC_INITFINAL].desc[i]); 2180 #endif 2181 2182 return ret; 2183 2184 error_out: 2185 rte_free(session->auth_key.data); 2186 rte_free(priv); 2187 return ret; 2188 } 2189 2190 static int 2191 dpaa2_sec_aead_init(struct rte_cryptodev *dev, 2192 struct rte_crypto_sym_xform *xform, 2193 dpaa2_sec_session *session) 2194 { 2195 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt; 2196 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 2197 struct alginfo aeaddata; 2198 int bufsize; 2199 struct ctxt_priv *priv; 2200 struct sec_flow_context *flc; 2201 struct rte_crypto_aead_xform *aead_xform = &xform->aead; 2202 int err, ret = 0; 2203 2204 PMD_INIT_FUNC_TRACE(); 2205 2206 /* Set IV parameters */ 2207 session->iv.offset = aead_xform->iv.offset; 2208 session->iv.length = aead_xform->iv.length; 2209 session->ctxt_type = DPAA2_SEC_AEAD; 2210 2211 /* For SEC AEAD only one descriptor is required */ 2212 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2213 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 2214 RTE_CACHE_LINE_SIZE); 2215 if (priv == NULL) { 2216 DPAA2_SEC_ERR("No Memory for priv CTXT"); 2217 return -ENOMEM; 2218 } 2219 2220 priv->fle_pool = dev_priv->fle_pool; 2221 flc = &priv->flc_desc[0].flc; 2222 2223 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 2224 RTE_CACHE_LINE_SIZE); 2225 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 2226 DPAA2_SEC_ERR("No Memory for aead key"); 2227 rte_free(priv); 2228 return -ENOMEM; 2229 } 2230 memcpy(session->aead_key.data, aead_xform->key.data, 2231 aead_xform->key.length); 2232 2233 session->digest_length = aead_xform->digest_length; 2234 session->aead_key.length = aead_xform->key.length; 2235 ctxt->auth_only_len = aead_xform->aad_length; 2236 2237 aeaddata.key = (size_t)session->aead_key.data; 2238 aeaddata.keylen = session->aead_key.length; 2239 aeaddata.key_enc_flags = 0; 2240 aeaddata.key_type = RTA_DATA_IMM; 2241 2242 switch (aead_xform->algo) { 2243 case RTE_CRYPTO_AEAD_AES_GCM: 2244 aeaddata.algtype = OP_ALG_ALGSEL_AES; 2245 aeaddata.algmode = OP_ALG_AAI_GCM; 2246 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; 2247 break; 2248 case RTE_CRYPTO_AEAD_AES_CCM: 2249 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u", 2250 aead_xform->algo); 2251 ret = -ENOTSUP; 2252 goto error_out; 2253 default: 2254 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u", 2255 aead_xform->algo); 2256 ret = -ENOTSUP; 2257 goto error_out; 2258 } 2259 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 2260 DIR_ENC : DIR_DEC; 2261 2262 priv->flc_desc[0].desc[0] = aeaddata.keylen; 2263 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 2264 DESC_JOB_IO_LEN, 2265 (unsigned int *)priv->flc_desc[0].desc, 2266 &priv->flc_desc[0].desc[1], 1); 2267 2268 if (err < 0) { 2269 DPAA2_SEC_ERR("Crypto: Incorrect key lengths"); 2270 ret = -EINVAL; 2271 goto error_out; 2272 } 2273 if (priv->flc_desc[0].desc[1] & 1) { 2274 aeaddata.key_type = RTA_DATA_IMM; 2275 } else { 2276 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key); 2277 aeaddata.key_type = RTA_DATA_PTR; 2278 } 2279 priv->flc_desc[0].desc[0] = 0; 2280 priv->flc_desc[0].desc[1] = 0; 2281 2282 if (session->dir == DIR_ENC) 2283 bufsize = cnstr_shdsc_gcm_encap( 2284 priv->flc_desc[0].desc, 1, 0, SHR_NEVER, 2285 &aeaddata, session->iv.length, 2286 session->digest_length); 2287 else 2288 bufsize = cnstr_shdsc_gcm_decap( 2289 priv->flc_desc[0].desc, 1, 0, SHR_NEVER, 2290 &aeaddata, session->iv.length, 2291 session->digest_length); 2292 if (bufsize < 0) { 2293 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2294 ret = -EINVAL; 2295 goto error_out; 2296 } 2297 2298 flc->word1_sdl = (uint8_t)bufsize; 2299 session->ctxt = priv; 2300 #ifdef CAAM_DESC_DEBUG 2301 int i; 2302 for (i = 0; i < bufsize; i++) 2303 DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n", 2304 i, priv->flc_desc[0].desc[i]); 2305 #endif 2306 return ret; 2307 2308 error_out: 2309 rte_free(session->aead_key.data); 2310 rte_free(priv); 2311 return ret; 2312 } 2313 2314 2315 static int 2316 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, 2317 struct rte_crypto_sym_xform *xform, 2318 dpaa2_sec_session *session) 2319 { 2320 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 2321 struct alginfo authdata, cipherdata; 2322 int bufsize; 2323 struct ctxt_priv *priv; 2324 struct sec_flow_context *flc; 2325 struct rte_crypto_cipher_xform *cipher_xform; 2326 struct rte_crypto_auth_xform *auth_xform; 2327 int err, ret = 0; 2328 2329 PMD_INIT_FUNC_TRACE(); 2330 2331 if (session->ext_params.aead_ctxt.auth_cipher_text) { 2332 cipher_xform = &xform->cipher; 2333 auth_xform = &xform->next->auth; 2334 session->ctxt_type = 2335 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2336 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER; 2337 } else { 2338 cipher_xform = &xform->next->cipher; 2339 auth_xform = &xform->auth; 2340 session->ctxt_type = 2341 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2342 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH; 2343 } 2344 2345 /* Set IV parameters */ 2346 session->iv.offset = cipher_xform->iv.offset; 2347 session->iv.length = cipher_xform->iv.length; 2348 2349 /* For SEC AEAD only one descriptor is required */ 2350 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2351 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 2352 RTE_CACHE_LINE_SIZE); 2353 if (priv == NULL) { 2354 DPAA2_SEC_ERR("No Memory for priv CTXT"); 2355 return -ENOMEM; 2356 } 2357 2358 priv->fle_pool = dev_priv->fle_pool; 2359 flc = &priv->flc_desc[0].flc; 2360 2361 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length, 2362 RTE_CACHE_LINE_SIZE); 2363 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) { 2364 DPAA2_SEC_ERR("No Memory for cipher key"); 2365 rte_free(priv); 2366 return -ENOMEM; 2367 } 2368 session->cipher_key.length = cipher_xform->key.length; 2369 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length, 2370 RTE_CACHE_LINE_SIZE); 2371 if (session->auth_key.data == NULL && auth_xform->key.length > 0) { 2372 DPAA2_SEC_ERR("No Memory for auth key"); 2373 rte_free(session->cipher_key.data); 2374 rte_free(priv); 2375 return -ENOMEM; 2376 } 2377 session->auth_key.length = auth_xform->key.length; 2378 memcpy(session->cipher_key.data, cipher_xform->key.data, 2379 cipher_xform->key.length); 2380 memcpy(session->auth_key.data, auth_xform->key.data, 2381 auth_xform->key.length); 2382 2383 authdata.key = (size_t)session->auth_key.data; 2384 authdata.keylen = session->auth_key.length; 2385 authdata.key_enc_flags = 0; 2386 authdata.key_type = RTA_DATA_IMM; 2387 2388 session->digest_length = auth_xform->digest_length; 2389 2390 switch (auth_xform->algo) { 2391 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2392 authdata.algtype = OP_ALG_ALGSEL_SHA1; 2393 authdata.algmode = OP_ALG_AAI_HMAC; 2394 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 2395 break; 2396 case RTE_CRYPTO_AUTH_MD5_HMAC: 2397 authdata.algtype = OP_ALG_ALGSEL_MD5; 2398 authdata.algmode = OP_ALG_AAI_HMAC; 2399 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 2400 break; 2401 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2402 authdata.algtype = OP_ALG_ALGSEL_SHA224; 2403 authdata.algmode = OP_ALG_AAI_HMAC; 2404 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 2405 break; 2406 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2407 authdata.algtype = OP_ALG_ALGSEL_SHA256; 2408 authdata.algmode = OP_ALG_AAI_HMAC; 2409 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 2410 break; 2411 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2412 authdata.algtype = OP_ALG_ALGSEL_SHA384; 2413 authdata.algmode = OP_ALG_AAI_HMAC; 2414 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 2415 break; 2416 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2417 authdata.algtype = OP_ALG_ALGSEL_SHA512; 2418 authdata.algmode = OP_ALG_AAI_HMAC; 2419 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 2420 break; 2421 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2422 authdata.algtype = OP_ALG_ALGSEL_AES; 2423 authdata.algmode = OP_ALG_AAI_XCBC_MAC; 2424 session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC; 2425 break; 2426 case RTE_CRYPTO_AUTH_AES_CMAC: 2427 authdata.algtype = OP_ALG_ALGSEL_AES; 2428 authdata.algmode = OP_ALG_AAI_CMAC; 2429 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC; 2430 break; 2431 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 2432 case RTE_CRYPTO_AUTH_AES_GMAC: 2433 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2434 case RTE_CRYPTO_AUTH_NULL: 2435 case RTE_CRYPTO_AUTH_SHA1: 2436 case RTE_CRYPTO_AUTH_SHA256: 2437 case RTE_CRYPTO_AUTH_SHA512: 2438 case RTE_CRYPTO_AUTH_SHA224: 2439 case RTE_CRYPTO_AUTH_SHA384: 2440 case RTE_CRYPTO_AUTH_MD5: 2441 case RTE_CRYPTO_AUTH_KASUMI_F9: 2442 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2443 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 2444 auth_xform->algo); 2445 ret = -ENOTSUP; 2446 goto error_out; 2447 default: 2448 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", 2449 auth_xform->algo); 2450 ret = -ENOTSUP; 2451 goto error_out; 2452 } 2453 cipherdata.key = (size_t)session->cipher_key.data; 2454 cipherdata.keylen = session->cipher_key.length; 2455 cipherdata.key_enc_flags = 0; 2456 cipherdata.key_type = RTA_DATA_IMM; 2457 2458 switch (cipher_xform->algo) { 2459 case RTE_CRYPTO_CIPHER_AES_CBC: 2460 cipherdata.algtype = OP_ALG_ALGSEL_AES; 2461 cipherdata.algmode = OP_ALG_AAI_CBC; 2462 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 2463 break; 2464 case RTE_CRYPTO_CIPHER_3DES_CBC: 2465 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 2466 cipherdata.algmode = OP_ALG_AAI_CBC; 2467 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 2468 break; 2469 case RTE_CRYPTO_CIPHER_DES_CBC: 2470 cipherdata.algtype = OP_ALG_ALGSEL_DES; 2471 cipherdata.algmode = OP_ALG_AAI_CBC; 2472 session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC; 2473 break; 2474 case RTE_CRYPTO_CIPHER_AES_CTR: 2475 cipherdata.algtype = OP_ALG_ALGSEL_AES; 2476 cipherdata.algmode = OP_ALG_AAI_CTR; 2477 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 2478 break; 2479 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2480 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2481 case RTE_CRYPTO_CIPHER_NULL: 2482 case RTE_CRYPTO_CIPHER_3DES_ECB: 2483 case RTE_CRYPTO_CIPHER_3DES_CTR: 2484 case RTE_CRYPTO_CIPHER_AES_ECB: 2485 case RTE_CRYPTO_CIPHER_KASUMI_F8: 2486 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", 2487 cipher_xform->algo); 2488 ret = -ENOTSUP; 2489 goto error_out; 2490 default: 2491 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 2492 cipher_xform->algo); 2493 ret = -ENOTSUP; 2494 goto error_out; 2495 } 2496 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2497 DIR_ENC : DIR_DEC; 2498 2499 priv->flc_desc[0].desc[0] = cipherdata.keylen; 2500 priv->flc_desc[0].desc[1] = authdata.keylen; 2501 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 2502 DESC_JOB_IO_LEN, 2503 (unsigned int *)priv->flc_desc[0].desc, 2504 &priv->flc_desc[0].desc[2], 2); 2505 2506 if (err < 0) { 2507 DPAA2_SEC_ERR("Crypto: Incorrect key lengths"); 2508 ret = -EINVAL; 2509 goto error_out; 2510 } 2511 if (priv->flc_desc[0].desc[2] & 1) { 2512 cipherdata.key_type = RTA_DATA_IMM; 2513 } else { 2514 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); 2515 cipherdata.key_type = RTA_DATA_PTR; 2516 } 2517 if (priv->flc_desc[0].desc[2] & (1 << 1)) { 2518 authdata.key_type = RTA_DATA_IMM; 2519 } else { 2520 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key); 2521 authdata.key_type = RTA_DATA_PTR; 2522 } 2523 priv->flc_desc[0].desc[0] = 0; 2524 priv->flc_desc[0].desc[1] = 0; 2525 priv->flc_desc[0].desc[2] = 0; 2526 2527 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) { 2528 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1, 2529 0, SHR_SERIAL, 2530 &cipherdata, &authdata, 2531 session->iv.length, 2532 session->digest_length, 2533 session->dir); 2534 if (bufsize < 0) { 2535 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2536 ret = -EINVAL; 2537 goto error_out; 2538 } 2539 } else { 2540 DPAA2_SEC_ERR("Hash before cipher not supported"); 2541 ret = -ENOTSUP; 2542 goto error_out; 2543 } 2544 2545 flc->word1_sdl = (uint8_t)bufsize; 2546 session->ctxt = priv; 2547 #ifdef CAAM_DESC_DEBUG 2548 int i; 2549 for (i = 0; i < bufsize; i++) 2550 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", 2551 i, priv->flc_desc[0].desc[i]); 2552 #endif 2553 2554 return ret; 2555 2556 error_out: 2557 rte_free(session->cipher_key.data); 2558 rte_free(session->auth_key.data); 2559 rte_free(priv); 2560 return ret; 2561 } 2562 2563 static int 2564 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev, 2565 struct rte_crypto_sym_xform *xform, void *sess) 2566 { 2567 dpaa2_sec_session *session = sess; 2568 int ret; 2569 2570 PMD_INIT_FUNC_TRACE(); 2571 2572 if (unlikely(sess == NULL)) { 2573 DPAA2_SEC_ERR("Invalid session struct"); 2574 return -EINVAL; 2575 } 2576 2577 memset(session, 0, sizeof(dpaa2_sec_session)); 2578 /* Default IV length = 0 */ 2579 session->iv.length = 0; 2580 2581 /* Cipher Only */ 2582 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 2583 ret = dpaa2_sec_cipher_init(dev, xform, session); 2584 2585 /* Authentication Only */ 2586 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2587 xform->next == NULL) { 2588 ret = dpaa2_sec_auth_init(dev, xform, session); 2589 2590 /* Cipher then Authenticate */ 2591 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 2592 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2593 session->ext_params.aead_ctxt.auth_cipher_text = true; 2594 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL) 2595 ret = dpaa2_sec_auth_init(dev, xform, session); 2596 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL) 2597 ret = dpaa2_sec_cipher_init(dev, xform, session); 2598 else 2599 ret = dpaa2_sec_aead_chain_init(dev, xform, session); 2600 /* Authenticate then Cipher */ 2601 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2602 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2603 session->ext_params.aead_ctxt.auth_cipher_text = false; 2604 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL) 2605 ret = dpaa2_sec_cipher_init(dev, xform, session); 2606 else if (xform->next->cipher.algo == RTE_CRYPTO_CIPHER_NULL) 2607 ret = dpaa2_sec_auth_init(dev, xform, session); 2608 else 2609 ret = dpaa2_sec_aead_chain_init(dev, xform, session); 2610 /* AEAD operation for AES-GCM kind of Algorithms */ 2611 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && 2612 xform->next == NULL) { 2613 ret = dpaa2_sec_aead_init(dev, xform, session); 2614 2615 } else { 2616 DPAA2_SEC_ERR("Invalid crypto type"); 2617 return -EINVAL; 2618 } 2619 2620 return ret; 2621 } 2622 2623 #ifdef RTE_LIB_SECURITY 2624 static int 2625 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform, 2626 dpaa2_sec_session *session, 2627 struct alginfo *aeaddata) 2628 { 2629 PMD_INIT_FUNC_TRACE(); 2630 2631 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 2632 RTE_CACHE_LINE_SIZE); 2633 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 2634 DPAA2_SEC_ERR("No Memory for aead key"); 2635 return -ENOMEM; 2636 } 2637 memcpy(session->aead_key.data, aead_xform->key.data, 2638 aead_xform->key.length); 2639 2640 session->digest_length = aead_xform->digest_length; 2641 session->aead_key.length = aead_xform->key.length; 2642 2643 aeaddata->key = (size_t)session->aead_key.data; 2644 aeaddata->keylen = session->aead_key.length; 2645 aeaddata->key_enc_flags = 0; 2646 aeaddata->key_type = RTA_DATA_IMM; 2647 2648 switch (aead_xform->algo) { 2649 case RTE_CRYPTO_AEAD_AES_GCM: 2650 switch (session->digest_length) { 2651 case 8: 2652 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM8; 2653 break; 2654 case 12: 2655 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM12; 2656 break; 2657 case 16: 2658 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM16; 2659 break; 2660 default: 2661 DPAA2_SEC_ERR("Crypto: Undefined GCM digest %d", 2662 session->digest_length); 2663 return -EINVAL; 2664 } 2665 aeaddata->algmode = OP_ALG_AAI_GCM; 2666 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; 2667 break; 2668 case RTE_CRYPTO_AEAD_AES_CCM: 2669 switch (session->digest_length) { 2670 case 8: 2671 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM8; 2672 break; 2673 case 12: 2674 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM12; 2675 break; 2676 case 16: 2677 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM16; 2678 break; 2679 default: 2680 DPAA2_SEC_ERR("Crypto: Undefined CCM digest %d", 2681 session->digest_length); 2682 return -EINVAL; 2683 } 2684 aeaddata->algmode = OP_ALG_AAI_CCM; 2685 session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM; 2686 break; 2687 default: 2688 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u", 2689 aead_xform->algo); 2690 return -ENOTSUP; 2691 } 2692 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 2693 DIR_ENC : DIR_DEC; 2694 2695 return 0; 2696 } 2697 2698 static int 2699 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform, 2700 struct rte_crypto_auth_xform *auth_xform, 2701 dpaa2_sec_session *session, 2702 struct alginfo *cipherdata, 2703 struct alginfo *authdata) 2704 { 2705 if (cipher_xform) { 2706 session->cipher_key.data = rte_zmalloc(NULL, 2707 cipher_xform->key.length, 2708 RTE_CACHE_LINE_SIZE); 2709 if (session->cipher_key.data == NULL && 2710 cipher_xform->key.length > 0) { 2711 DPAA2_SEC_ERR("No Memory for cipher key"); 2712 return -ENOMEM; 2713 } 2714 2715 session->cipher_key.length = cipher_xform->key.length; 2716 memcpy(session->cipher_key.data, cipher_xform->key.data, 2717 cipher_xform->key.length); 2718 session->cipher_alg = cipher_xform->algo; 2719 } else { 2720 session->cipher_key.data = NULL; 2721 session->cipher_key.length = 0; 2722 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2723 } 2724 2725 if (auth_xform) { 2726 session->auth_key.data = rte_zmalloc(NULL, 2727 auth_xform->key.length, 2728 RTE_CACHE_LINE_SIZE); 2729 if (session->auth_key.data == NULL && 2730 auth_xform->key.length > 0) { 2731 DPAA2_SEC_ERR("No Memory for auth key"); 2732 return -ENOMEM; 2733 } 2734 session->auth_key.length = auth_xform->key.length; 2735 memcpy(session->auth_key.data, auth_xform->key.data, 2736 auth_xform->key.length); 2737 session->auth_alg = auth_xform->algo; 2738 session->digest_length = auth_xform->digest_length; 2739 } else { 2740 session->auth_key.data = NULL; 2741 session->auth_key.length = 0; 2742 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 2743 } 2744 2745 authdata->key = (size_t)session->auth_key.data; 2746 authdata->keylen = session->auth_key.length; 2747 authdata->key_enc_flags = 0; 2748 authdata->key_type = RTA_DATA_IMM; 2749 switch (session->auth_alg) { 2750 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2751 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96; 2752 authdata->algmode = OP_ALG_AAI_HMAC; 2753 break; 2754 case RTE_CRYPTO_AUTH_MD5_HMAC: 2755 authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96; 2756 authdata->algmode = OP_ALG_AAI_HMAC; 2757 break; 2758 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2759 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128; 2760 authdata->algmode = OP_ALG_AAI_HMAC; 2761 if (session->digest_length != 16) 2762 DPAA2_SEC_WARN( 2763 "+++Using sha256-hmac truncated len is non-standard," 2764 "it will not work with lookaside proto"); 2765 break; 2766 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2767 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192; 2768 authdata->algmode = OP_ALG_AAI_HMAC; 2769 break; 2770 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2771 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256; 2772 authdata->algmode = OP_ALG_AAI_HMAC; 2773 break; 2774 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2775 authdata->algtype = OP_PCL_IPSEC_AES_XCBC_MAC_96; 2776 authdata->algmode = OP_ALG_AAI_XCBC_MAC; 2777 break; 2778 case RTE_CRYPTO_AUTH_AES_CMAC: 2779 authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96; 2780 authdata->algmode = OP_ALG_AAI_CMAC; 2781 break; 2782 case RTE_CRYPTO_AUTH_NULL: 2783 authdata->algtype = OP_PCL_IPSEC_HMAC_NULL; 2784 break; 2785 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2786 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2787 case RTE_CRYPTO_AUTH_SHA1: 2788 case RTE_CRYPTO_AUTH_SHA256: 2789 case RTE_CRYPTO_AUTH_SHA512: 2790 case RTE_CRYPTO_AUTH_SHA224: 2791 case RTE_CRYPTO_AUTH_SHA384: 2792 case RTE_CRYPTO_AUTH_MD5: 2793 case RTE_CRYPTO_AUTH_AES_GMAC: 2794 case RTE_CRYPTO_AUTH_KASUMI_F9: 2795 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 2796 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2797 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 2798 session->auth_alg); 2799 return -ENOTSUP; 2800 default: 2801 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", 2802 session->auth_alg); 2803 return -ENOTSUP; 2804 } 2805 cipherdata->key = (size_t)session->cipher_key.data; 2806 cipherdata->keylen = session->cipher_key.length; 2807 cipherdata->key_enc_flags = 0; 2808 cipherdata->key_type = RTA_DATA_IMM; 2809 2810 switch (session->cipher_alg) { 2811 case RTE_CRYPTO_CIPHER_AES_CBC: 2812 cipherdata->algtype = OP_PCL_IPSEC_AES_CBC; 2813 cipherdata->algmode = OP_ALG_AAI_CBC; 2814 break; 2815 case RTE_CRYPTO_CIPHER_3DES_CBC: 2816 cipherdata->algtype = OP_PCL_IPSEC_3DES; 2817 cipherdata->algmode = OP_ALG_AAI_CBC; 2818 break; 2819 case RTE_CRYPTO_CIPHER_DES_CBC: 2820 cipherdata->algtype = OP_PCL_IPSEC_DES; 2821 cipherdata->algmode = OP_ALG_AAI_CBC; 2822 break; 2823 case RTE_CRYPTO_CIPHER_AES_CTR: 2824 cipherdata->algtype = OP_PCL_IPSEC_AES_CTR; 2825 cipherdata->algmode = OP_ALG_AAI_CTR; 2826 break; 2827 case RTE_CRYPTO_CIPHER_NULL: 2828 cipherdata->algtype = OP_PCL_IPSEC_NULL; 2829 break; 2830 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2831 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2832 case RTE_CRYPTO_CIPHER_3DES_ECB: 2833 case RTE_CRYPTO_CIPHER_3DES_CTR: 2834 case RTE_CRYPTO_CIPHER_AES_ECB: 2835 case RTE_CRYPTO_CIPHER_KASUMI_F8: 2836 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", 2837 session->cipher_alg); 2838 return -ENOTSUP; 2839 default: 2840 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 2841 session->cipher_alg); 2842 return -ENOTSUP; 2843 } 2844 2845 return 0; 2846 } 2847 2848 static int 2849 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, 2850 struct rte_security_session_conf *conf, 2851 void *sess) 2852 { 2853 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec; 2854 struct rte_crypto_cipher_xform *cipher_xform = NULL; 2855 struct rte_crypto_auth_xform *auth_xform = NULL; 2856 struct rte_crypto_aead_xform *aead_xform = NULL; 2857 dpaa2_sec_session *session = (dpaa2_sec_session *)sess; 2858 struct ctxt_priv *priv; 2859 struct alginfo authdata, cipherdata; 2860 int bufsize; 2861 struct sec_flow_context *flc; 2862 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 2863 int ret = -1; 2864 2865 PMD_INIT_FUNC_TRACE(); 2866 2867 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2868 sizeof(struct ctxt_priv) + 2869 sizeof(struct sec_flc_desc), 2870 RTE_CACHE_LINE_SIZE); 2871 2872 if (priv == NULL) { 2873 DPAA2_SEC_ERR("No memory for priv CTXT"); 2874 return -ENOMEM; 2875 } 2876 2877 priv->fle_pool = dev_priv->fle_pool; 2878 flc = &priv->flc_desc[0].flc; 2879 2880 if (ipsec_xform->life.bytes_hard_limit != 0 || 2881 ipsec_xform->life.bytes_soft_limit != 0 || 2882 ipsec_xform->life.packets_hard_limit != 0 || 2883 ipsec_xform->life.packets_soft_limit != 0) 2884 return -ENOTSUP; 2885 2886 memset(session, 0, sizeof(dpaa2_sec_session)); 2887 2888 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2889 cipher_xform = &conf->crypto_xform->cipher; 2890 if (conf->crypto_xform->next) 2891 auth_xform = &conf->crypto_xform->next->auth; 2892 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform, 2893 session, &cipherdata, &authdata); 2894 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2895 auth_xform = &conf->crypto_xform->auth; 2896 if (conf->crypto_xform->next) 2897 cipher_xform = &conf->crypto_xform->next->cipher; 2898 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform, 2899 session, &cipherdata, &authdata); 2900 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { 2901 aead_xform = &conf->crypto_xform->aead; 2902 ret = dpaa2_sec_ipsec_aead_init(aead_xform, 2903 session, &cipherdata); 2904 authdata.keylen = 0; 2905 authdata.algtype = 0; 2906 } else { 2907 DPAA2_SEC_ERR("XFORM not specified"); 2908 ret = -EINVAL; 2909 goto out; 2910 } 2911 if (ret) { 2912 DPAA2_SEC_ERR("Failed to process xform"); 2913 goto out; 2914 } 2915 2916 session->ctxt_type = DPAA2_SEC_IPSEC; 2917 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 2918 uint8_t *hdr = NULL; 2919 struct ip ip4_hdr; 2920 struct rte_ipv6_hdr ip6_hdr; 2921 struct ipsec_encap_pdb encap_pdb; 2922 2923 flc->dhr = SEC_FLC_DHR_OUTBOUND; 2924 /* For Sec Proto only one descriptor is required. */ 2925 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb)); 2926 2927 /* copy algo specific data to PDB */ 2928 switch (cipherdata.algtype) { 2929 case OP_PCL_IPSEC_AES_CTR: 2930 encap_pdb.ctr.ctr_initial = 0x00000001; 2931 encap_pdb.ctr.ctr_nonce = ipsec_xform->salt; 2932 break; 2933 case OP_PCL_IPSEC_AES_GCM8: 2934 case OP_PCL_IPSEC_AES_GCM12: 2935 case OP_PCL_IPSEC_AES_GCM16: 2936 memcpy(encap_pdb.gcm.salt, 2937 (uint8_t *)&(ipsec_xform->salt), 4); 2938 break; 2939 } 2940 2941 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) | 2942 PDBOPTS_ESP_OIHI_PDB_INL | 2943 PDBOPTS_ESP_IVSRC | 2944 PDBHMO_ESP_SNR; 2945 if (ipsec_xform->options.dec_ttl) 2946 encap_pdb.options |= PDBHMO_ESP_ENCAP_DTTL; 2947 if (ipsec_xform->options.esn) 2948 encap_pdb.options |= PDBOPTS_ESP_ESN; 2949 encap_pdb.spi = ipsec_xform->spi; 2950 session->dir = DIR_ENC; 2951 if (ipsec_xform->tunnel.type == 2952 RTE_SECURITY_IPSEC_TUNNEL_IPV4) { 2953 encap_pdb.ip_hdr_len = sizeof(struct ip); 2954 ip4_hdr.ip_v = IPVERSION; 2955 ip4_hdr.ip_hl = 5; 2956 ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr)); 2957 ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp; 2958 ip4_hdr.ip_id = 0; 2959 ip4_hdr.ip_off = 0; 2960 ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl; 2961 ip4_hdr.ip_p = IPPROTO_ESP; 2962 ip4_hdr.ip_sum = 0; 2963 ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip; 2964 ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip; 2965 ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *) 2966 &ip4_hdr, sizeof(struct ip)); 2967 hdr = (uint8_t *)&ip4_hdr; 2968 } else if (ipsec_xform->tunnel.type == 2969 RTE_SECURITY_IPSEC_TUNNEL_IPV6) { 2970 ip6_hdr.vtc_flow = rte_cpu_to_be_32( 2971 DPAA2_IPv6_DEFAULT_VTC_FLOW | 2972 ((ipsec_xform->tunnel.ipv6.dscp << 2973 RTE_IPV6_HDR_TC_SHIFT) & 2974 RTE_IPV6_HDR_TC_MASK) | 2975 ((ipsec_xform->tunnel.ipv6.flabel << 2976 RTE_IPV6_HDR_FL_SHIFT) & 2977 RTE_IPV6_HDR_FL_MASK)); 2978 /* Payload length will be updated by HW */ 2979 ip6_hdr.payload_len = 0; 2980 ip6_hdr.hop_limits = 2981 ipsec_xform->tunnel.ipv6.hlimit; 2982 ip6_hdr.proto = (ipsec_xform->proto == 2983 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? 2984 IPPROTO_ESP : IPPROTO_AH; 2985 memcpy(&ip6_hdr.src_addr, 2986 &ipsec_xform->tunnel.ipv6.src_addr, 16); 2987 memcpy(&ip6_hdr.dst_addr, 2988 &ipsec_xform->tunnel.ipv6.dst_addr, 16); 2989 encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr); 2990 hdr = (uint8_t *)&ip6_hdr; 2991 } 2992 2993 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc, 2994 1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ? 2995 SHR_WAIT : SHR_SERIAL, &encap_pdb, 2996 hdr, &cipherdata, &authdata); 2997 } else if (ipsec_xform->direction == 2998 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { 2999 struct ipsec_decap_pdb decap_pdb; 3000 3001 flc->dhr = SEC_FLC_DHR_INBOUND; 3002 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb)); 3003 /* copy algo specific data to PDB */ 3004 switch (cipherdata.algtype) { 3005 case OP_PCL_IPSEC_AES_CTR: 3006 decap_pdb.ctr.ctr_initial = 0x00000001; 3007 decap_pdb.ctr.ctr_nonce = ipsec_xform->salt; 3008 break; 3009 case OP_PCL_IPSEC_AES_GCM8: 3010 case OP_PCL_IPSEC_AES_GCM12: 3011 case OP_PCL_IPSEC_AES_GCM16: 3012 memcpy(decap_pdb.gcm.salt, 3013 (uint8_t *)&(ipsec_xform->salt), 4); 3014 break; 3015 } 3016 3017 decap_pdb.options = (ipsec_xform->tunnel.type == 3018 RTE_SECURITY_IPSEC_TUNNEL_IPV4) ? 3019 sizeof(struct ip) << 16 : 3020 sizeof(struct rte_ipv6_hdr) << 16; 3021 if (ipsec_xform->options.esn) 3022 decap_pdb.options |= PDBOPTS_ESP_ESN; 3023 3024 if (ipsec_xform->replay_win_sz) { 3025 uint32_t win_sz; 3026 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz); 3027 3028 if (rta_sec_era < RTA_SEC_ERA_10 && win_sz > 128) { 3029 DPAA2_SEC_INFO("Max Anti replay Win sz = 128"); 3030 win_sz = 128; 3031 } 3032 switch (win_sz) { 3033 case 1: 3034 case 2: 3035 case 4: 3036 case 8: 3037 case 16: 3038 case 32: 3039 decap_pdb.options |= PDBOPTS_ESP_ARS32; 3040 break; 3041 case 64: 3042 decap_pdb.options |= PDBOPTS_ESP_ARS64; 3043 break; 3044 case 256: 3045 decap_pdb.options |= PDBOPTS_ESP_ARS256; 3046 break; 3047 case 512: 3048 decap_pdb.options |= PDBOPTS_ESP_ARS512; 3049 break; 3050 case 1024: 3051 decap_pdb.options |= PDBOPTS_ESP_ARS1024; 3052 break; 3053 case 128: 3054 default: 3055 decap_pdb.options |= PDBOPTS_ESP_ARS128; 3056 } 3057 } 3058 session->dir = DIR_DEC; 3059 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc, 3060 1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ? 3061 SHR_WAIT : SHR_SERIAL, 3062 &decap_pdb, &cipherdata, &authdata); 3063 } else 3064 goto out; 3065 3066 if (bufsize < 0) { 3067 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 3068 goto out; 3069 } 3070 3071 flc->word1_sdl = (uint8_t)bufsize; 3072 3073 /* Enable the stashing control bit */ 3074 DPAA2_SET_FLC_RSC(flc); 3075 flc->word2_rflc_31_0 = lower_32_bits( 3076 (size_t)&(((struct dpaa2_sec_qp *) 3077 dev->data->queue_pairs[0])->rx_vq) | 0x14); 3078 flc->word3_rflc_63_32 = upper_32_bits( 3079 (size_t)&(((struct dpaa2_sec_qp *) 3080 dev->data->queue_pairs[0])->rx_vq)); 3081 3082 /* Set EWS bit i.e. enable write-safe */ 3083 DPAA2_SET_FLC_EWS(flc); 3084 /* Set BS = 1 i.e reuse input buffers as output buffers */ 3085 DPAA2_SET_FLC_REUSE_BS(flc); 3086 /* Set FF = 10; reuse input buffers if they provide sufficient space */ 3087 DPAA2_SET_FLC_REUSE_FF(flc); 3088 3089 session->ctxt = priv; 3090 3091 return 0; 3092 out: 3093 rte_free(session->auth_key.data); 3094 rte_free(session->cipher_key.data); 3095 rte_free(priv); 3096 return ret; 3097 } 3098 3099 static int 3100 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev, 3101 struct rte_security_session_conf *conf, 3102 void *sess) 3103 { 3104 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp; 3105 struct rte_crypto_sym_xform *xform = conf->crypto_xform; 3106 struct rte_crypto_auth_xform *auth_xform = NULL; 3107 struct rte_crypto_cipher_xform *cipher_xform = NULL; 3108 dpaa2_sec_session *session = (dpaa2_sec_session *)sess; 3109 struct ctxt_priv *priv; 3110 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 3111 struct alginfo authdata, cipherdata; 3112 struct alginfo *p_authdata = NULL; 3113 int bufsize = -1; 3114 struct sec_flow_context *flc; 3115 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 3116 int swap = true; 3117 #else 3118 int swap = false; 3119 #endif 3120 3121 PMD_INIT_FUNC_TRACE(); 3122 3123 memset(session, 0, sizeof(dpaa2_sec_session)); 3124 3125 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 3126 sizeof(struct ctxt_priv) + 3127 sizeof(struct sec_flc_desc), 3128 RTE_CACHE_LINE_SIZE); 3129 3130 if (priv == NULL) { 3131 DPAA2_SEC_ERR("No memory for priv CTXT"); 3132 return -ENOMEM; 3133 } 3134 3135 priv->fle_pool = dev_priv->fle_pool; 3136 flc = &priv->flc_desc[0].flc; 3137 3138 /* find xfrm types */ 3139 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 3140 cipher_xform = &xform->cipher; 3141 if (xform->next != NULL) { 3142 session->ext_params.aead_ctxt.auth_cipher_text = true; 3143 auth_xform = &xform->next->auth; 3144 } 3145 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 3146 auth_xform = &xform->auth; 3147 if (xform->next != NULL) { 3148 session->ext_params.aead_ctxt.auth_cipher_text = false; 3149 cipher_xform = &xform->next->cipher; 3150 } 3151 } else { 3152 DPAA2_SEC_ERR("Invalid crypto type"); 3153 return -EINVAL; 3154 } 3155 3156 session->ctxt_type = DPAA2_SEC_PDCP; 3157 if (cipher_xform) { 3158 session->cipher_key.data = rte_zmalloc(NULL, 3159 cipher_xform->key.length, 3160 RTE_CACHE_LINE_SIZE); 3161 if (session->cipher_key.data == NULL && 3162 cipher_xform->key.length > 0) { 3163 DPAA2_SEC_ERR("No Memory for cipher key"); 3164 rte_free(priv); 3165 return -ENOMEM; 3166 } 3167 session->cipher_key.length = cipher_xform->key.length; 3168 memcpy(session->cipher_key.data, cipher_xform->key.data, 3169 cipher_xform->key.length); 3170 session->dir = 3171 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 3172 DIR_ENC : DIR_DEC; 3173 session->cipher_alg = cipher_xform->algo; 3174 } else { 3175 session->cipher_key.data = NULL; 3176 session->cipher_key.length = 0; 3177 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 3178 session->dir = DIR_ENC; 3179 } 3180 3181 session->pdcp.domain = pdcp_xform->domain; 3182 session->pdcp.bearer = pdcp_xform->bearer; 3183 session->pdcp.pkt_dir = pdcp_xform->pkt_dir; 3184 session->pdcp.sn_size = pdcp_xform->sn_size; 3185 session->pdcp.hfn = pdcp_xform->hfn; 3186 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold; 3187 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd; 3188 /* hfv ovd offset location is stored in iv.offset value*/ 3189 if (cipher_xform) 3190 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset; 3191 3192 cipherdata.key = (size_t)session->cipher_key.data; 3193 cipherdata.keylen = session->cipher_key.length; 3194 cipherdata.key_enc_flags = 0; 3195 cipherdata.key_type = RTA_DATA_IMM; 3196 3197 switch (session->cipher_alg) { 3198 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 3199 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW; 3200 break; 3201 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 3202 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC; 3203 break; 3204 case RTE_CRYPTO_CIPHER_AES_CTR: 3205 cipherdata.algtype = PDCP_CIPHER_TYPE_AES; 3206 break; 3207 case RTE_CRYPTO_CIPHER_NULL: 3208 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL; 3209 break; 3210 default: 3211 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 3212 session->cipher_alg); 3213 goto out; 3214 } 3215 3216 if (auth_xform) { 3217 session->auth_key.data = rte_zmalloc(NULL, 3218 auth_xform->key.length, 3219 RTE_CACHE_LINE_SIZE); 3220 if (!session->auth_key.data && 3221 auth_xform->key.length > 0) { 3222 DPAA2_SEC_ERR("No Memory for auth key"); 3223 rte_free(session->cipher_key.data); 3224 rte_free(priv); 3225 return -ENOMEM; 3226 } 3227 session->auth_key.length = auth_xform->key.length; 3228 memcpy(session->auth_key.data, auth_xform->key.data, 3229 auth_xform->key.length); 3230 session->auth_alg = auth_xform->algo; 3231 } else { 3232 session->auth_key.data = NULL; 3233 session->auth_key.length = 0; 3234 session->auth_alg = 0; 3235 } 3236 authdata.key = (size_t)session->auth_key.data; 3237 authdata.keylen = session->auth_key.length; 3238 authdata.key_enc_flags = 0; 3239 authdata.key_type = RTA_DATA_IMM; 3240 3241 if (session->auth_alg) { 3242 switch (session->auth_alg) { 3243 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 3244 authdata.algtype = PDCP_AUTH_TYPE_SNOW; 3245 break; 3246 case RTE_CRYPTO_AUTH_ZUC_EIA3: 3247 authdata.algtype = PDCP_AUTH_TYPE_ZUC; 3248 break; 3249 case RTE_CRYPTO_AUTH_AES_CMAC: 3250 authdata.algtype = PDCP_AUTH_TYPE_AES; 3251 break; 3252 case RTE_CRYPTO_AUTH_NULL: 3253 authdata.algtype = PDCP_AUTH_TYPE_NULL; 3254 break; 3255 default: 3256 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 3257 session->auth_alg); 3258 goto out; 3259 } 3260 3261 p_authdata = &authdata; 3262 } else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 3263 DPAA2_SEC_ERR("Crypto: Integrity must for c-plane"); 3264 goto out; 3265 } 3266 3267 if (pdcp_xform->sdap_enabled) { 3268 int nb_keys_to_inline = 3269 rta_inline_pdcp_sdap_query(authdata.algtype, 3270 cipherdata.algtype, 3271 session->pdcp.sn_size, 3272 session->pdcp.hfn_ovd); 3273 if (nb_keys_to_inline >= 1) { 3274 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); 3275 cipherdata.key_type = RTA_DATA_PTR; 3276 } 3277 if (nb_keys_to_inline >= 2) { 3278 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key); 3279 authdata.key_type = RTA_DATA_PTR; 3280 } 3281 } else { 3282 if (rta_inline_pdcp_query(authdata.algtype, 3283 cipherdata.algtype, 3284 session->pdcp.sn_size, 3285 session->pdcp.hfn_ovd)) { 3286 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); 3287 cipherdata.key_type = RTA_DATA_PTR; 3288 } 3289 } 3290 3291 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 3292 if (session->dir == DIR_ENC) 3293 bufsize = cnstr_shdsc_pdcp_c_plane_encap( 3294 priv->flc_desc[0].desc, 1, swap, 3295 pdcp_xform->hfn, 3296 session->pdcp.sn_size, 3297 pdcp_xform->bearer, 3298 pdcp_xform->pkt_dir, 3299 pdcp_xform->hfn_threshold, 3300 &cipherdata, &authdata, 3301 0); 3302 else if (session->dir == DIR_DEC) 3303 bufsize = cnstr_shdsc_pdcp_c_plane_decap( 3304 priv->flc_desc[0].desc, 1, swap, 3305 pdcp_xform->hfn, 3306 session->pdcp.sn_size, 3307 pdcp_xform->bearer, 3308 pdcp_xform->pkt_dir, 3309 pdcp_xform->hfn_threshold, 3310 &cipherdata, &authdata, 3311 0); 3312 3313 } else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) { 3314 bufsize = cnstr_shdsc_pdcp_short_mac(priv->flc_desc[0].desc, 3315 1, swap, &authdata); 3316 } else { 3317 if (session->dir == DIR_ENC) { 3318 if (pdcp_xform->sdap_enabled) 3319 bufsize = cnstr_shdsc_pdcp_sdap_u_plane_encap( 3320 priv->flc_desc[0].desc, 1, swap, 3321 session->pdcp.sn_size, 3322 pdcp_xform->hfn, 3323 pdcp_xform->bearer, 3324 pdcp_xform->pkt_dir, 3325 pdcp_xform->hfn_threshold, 3326 &cipherdata, p_authdata, 0); 3327 else 3328 bufsize = cnstr_shdsc_pdcp_u_plane_encap( 3329 priv->flc_desc[0].desc, 1, swap, 3330 session->pdcp.sn_size, 3331 pdcp_xform->hfn, 3332 pdcp_xform->bearer, 3333 pdcp_xform->pkt_dir, 3334 pdcp_xform->hfn_threshold, 3335 &cipherdata, p_authdata, 0); 3336 } else if (session->dir == DIR_DEC) { 3337 if (pdcp_xform->sdap_enabled) 3338 bufsize = cnstr_shdsc_pdcp_sdap_u_plane_decap( 3339 priv->flc_desc[0].desc, 1, swap, 3340 session->pdcp.sn_size, 3341 pdcp_xform->hfn, 3342 pdcp_xform->bearer, 3343 pdcp_xform->pkt_dir, 3344 pdcp_xform->hfn_threshold, 3345 &cipherdata, p_authdata, 0); 3346 else 3347 bufsize = cnstr_shdsc_pdcp_u_plane_decap( 3348 priv->flc_desc[0].desc, 1, swap, 3349 session->pdcp.sn_size, 3350 pdcp_xform->hfn, 3351 pdcp_xform->bearer, 3352 pdcp_xform->pkt_dir, 3353 pdcp_xform->hfn_threshold, 3354 &cipherdata, p_authdata, 0); 3355 } 3356 } 3357 3358 if (bufsize < 0) { 3359 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 3360 goto out; 3361 } 3362 3363 /* Enable the stashing control bit */ 3364 DPAA2_SET_FLC_RSC(flc); 3365 flc->word2_rflc_31_0 = lower_32_bits( 3366 (size_t)&(((struct dpaa2_sec_qp *) 3367 dev->data->queue_pairs[0])->rx_vq) | 0x14); 3368 flc->word3_rflc_63_32 = upper_32_bits( 3369 (size_t)&(((struct dpaa2_sec_qp *) 3370 dev->data->queue_pairs[0])->rx_vq)); 3371 3372 flc->word1_sdl = (uint8_t)bufsize; 3373 3374 /* TODO - check the perf impact or 3375 * align as per descriptor type 3376 * Set EWS bit i.e. enable write-safe 3377 * DPAA2_SET_FLC_EWS(flc); 3378 */ 3379 3380 /* Set BS = 1 i.e reuse input buffers as output buffers */ 3381 DPAA2_SET_FLC_REUSE_BS(flc); 3382 /* Set FF = 10; reuse input buffers if they provide sufficient space */ 3383 DPAA2_SET_FLC_REUSE_FF(flc); 3384 3385 session->ctxt = priv; 3386 3387 return 0; 3388 out: 3389 rte_free(session->auth_key.data); 3390 rte_free(session->cipher_key.data); 3391 rte_free(priv); 3392 return -EINVAL; 3393 } 3394 3395 static int 3396 dpaa2_sec_security_session_create(void *dev, 3397 struct rte_security_session_conf *conf, 3398 struct rte_security_session *sess, 3399 struct rte_mempool *mempool) 3400 { 3401 void *sess_private_data; 3402 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; 3403 int ret; 3404 3405 if (rte_mempool_get(mempool, &sess_private_data)) { 3406 DPAA2_SEC_ERR("Couldn't get object from session mempool"); 3407 return -ENOMEM; 3408 } 3409 3410 switch (conf->protocol) { 3411 case RTE_SECURITY_PROTOCOL_IPSEC: 3412 ret = dpaa2_sec_set_ipsec_session(cdev, conf, 3413 sess_private_data); 3414 break; 3415 case RTE_SECURITY_PROTOCOL_MACSEC: 3416 return -ENOTSUP; 3417 case RTE_SECURITY_PROTOCOL_PDCP: 3418 ret = dpaa2_sec_set_pdcp_session(cdev, conf, 3419 sess_private_data); 3420 break; 3421 default: 3422 return -EINVAL; 3423 } 3424 if (ret != 0) { 3425 DPAA2_SEC_ERR("Failed to configure session parameters"); 3426 /* Return session to mempool */ 3427 rte_mempool_put(mempool, sess_private_data); 3428 return ret; 3429 } 3430 3431 set_sec_session_private_data(sess, sess_private_data); 3432 3433 return ret; 3434 } 3435 3436 /** Clear the memory of session so it doesn't leave key material behind */ 3437 static int 3438 dpaa2_sec_security_session_destroy(void *dev __rte_unused, 3439 struct rte_security_session *sess) 3440 { 3441 PMD_INIT_FUNC_TRACE(); 3442 void *sess_priv = get_sec_session_private_data(sess); 3443 3444 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv; 3445 3446 if (sess_priv) { 3447 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 3448 3449 rte_free(s->ctxt); 3450 rte_free(s->cipher_key.data); 3451 rte_free(s->auth_key.data); 3452 memset(s, 0, sizeof(dpaa2_sec_session)); 3453 set_sec_session_private_data(sess, NULL); 3454 rte_mempool_put(sess_mp, sess_priv); 3455 } 3456 return 0; 3457 } 3458 #endif 3459 static int 3460 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev, 3461 struct rte_crypto_sym_xform *xform, 3462 struct rte_cryptodev_sym_session *sess, 3463 struct rte_mempool *mempool) 3464 { 3465 void *sess_private_data; 3466 int ret; 3467 3468 if (rte_mempool_get(mempool, &sess_private_data)) { 3469 DPAA2_SEC_ERR("Couldn't get object from session mempool"); 3470 return -ENOMEM; 3471 } 3472 3473 ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data); 3474 if (ret != 0) { 3475 DPAA2_SEC_ERR("Failed to configure session parameters"); 3476 /* Return session to mempool */ 3477 rte_mempool_put(mempool, sess_private_data); 3478 return ret; 3479 } 3480 3481 set_sym_session_private_data(sess, dev->driver_id, 3482 sess_private_data); 3483 3484 return 0; 3485 } 3486 3487 /** Clear the memory of session so it doesn't leave key material behind */ 3488 static void 3489 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev, 3490 struct rte_cryptodev_sym_session *sess) 3491 { 3492 PMD_INIT_FUNC_TRACE(); 3493 uint8_t index = dev->driver_id; 3494 void *sess_priv = get_sym_session_private_data(sess, index); 3495 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv; 3496 3497 if (sess_priv) { 3498 rte_free(s->ctxt); 3499 rte_free(s->cipher_key.data); 3500 rte_free(s->auth_key.data); 3501 memset(s, 0, sizeof(dpaa2_sec_session)); 3502 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 3503 set_sym_session_private_data(sess, index, NULL); 3504 rte_mempool_put(sess_mp, sess_priv); 3505 } 3506 } 3507 3508 static int 3509 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused, 3510 struct rte_cryptodev_config *config __rte_unused) 3511 { 3512 PMD_INIT_FUNC_TRACE(); 3513 3514 return 0; 3515 } 3516 3517 static int 3518 dpaa2_sec_dev_start(struct rte_cryptodev *dev) 3519 { 3520 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3521 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3522 struct dpseci_attr attr; 3523 struct dpaa2_queue *dpaa2_q; 3524 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3525 dev->data->queue_pairs; 3526 struct dpseci_rx_queue_attr rx_attr; 3527 struct dpseci_tx_queue_attr tx_attr; 3528 int ret, i; 3529 3530 PMD_INIT_FUNC_TRACE(); 3531 3532 memset(&attr, 0, sizeof(struct dpseci_attr)); 3533 3534 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token); 3535 if (ret) { 3536 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED", 3537 priv->hw_id); 3538 goto get_attr_failure; 3539 } 3540 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr); 3541 if (ret) { 3542 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC"); 3543 goto get_attr_failure; 3544 } 3545 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) { 3546 dpaa2_q = &qp[i]->rx_vq; 3547 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 3548 &rx_attr); 3549 dpaa2_q->fqid = rx_attr.fqid; 3550 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid); 3551 } 3552 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) { 3553 dpaa2_q = &qp[i]->tx_vq; 3554 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 3555 &tx_attr); 3556 dpaa2_q->fqid = tx_attr.fqid; 3557 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid); 3558 } 3559 3560 return 0; 3561 get_attr_failure: 3562 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 3563 return -1; 3564 } 3565 3566 static void 3567 dpaa2_sec_dev_stop(struct rte_cryptodev *dev) 3568 { 3569 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3570 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3571 int ret; 3572 3573 PMD_INIT_FUNC_TRACE(); 3574 3575 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 3576 if (ret) { 3577 DPAA2_SEC_ERR("Failure in disabling dpseci %d device", 3578 priv->hw_id); 3579 return; 3580 } 3581 3582 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token); 3583 if (ret < 0) { 3584 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret); 3585 return; 3586 } 3587 } 3588 3589 static int 3590 dpaa2_sec_dev_close(struct rte_cryptodev *dev __rte_unused) 3591 { 3592 PMD_INIT_FUNC_TRACE(); 3593 3594 return 0; 3595 } 3596 3597 static void 3598 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev, 3599 struct rte_cryptodev_info *info) 3600 { 3601 struct dpaa2_sec_dev_private *internals = dev->data->dev_private; 3602 3603 PMD_INIT_FUNC_TRACE(); 3604 if (info != NULL) { 3605 info->max_nb_queue_pairs = internals->max_nb_queue_pairs; 3606 info->feature_flags = dev->feature_flags; 3607 info->capabilities = dpaa2_sec_capabilities; 3608 /* No limit of number of sessions */ 3609 info->sym.max_nb_sessions = 0; 3610 info->driver_id = cryptodev_driver_id; 3611 } 3612 } 3613 3614 static 3615 void dpaa2_sec_stats_get(struct rte_cryptodev *dev, 3616 struct rte_cryptodev_stats *stats) 3617 { 3618 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3619 struct fsl_mc_io dpseci; 3620 struct dpseci_sec_counters counters = {0}; 3621 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3622 dev->data->queue_pairs; 3623 int ret, i; 3624 3625 PMD_INIT_FUNC_TRACE(); 3626 if (stats == NULL) { 3627 DPAA2_SEC_ERR("Invalid stats ptr NULL"); 3628 return; 3629 } 3630 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 3631 if (qp == NULL || qp[i] == NULL) { 3632 DPAA2_SEC_DEBUG("Uninitialised queue pair"); 3633 continue; 3634 } 3635 3636 stats->enqueued_count += qp[i]->tx_vq.tx_pkts; 3637 stats->dequeued_count += qp[i]->rx_vq.rx_pkts; 3638 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts; 3639 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts; 3640 } 3641 3642 /* In case as secondary process access stats, MCP portal in priv-hw 3643 * may have primary process address. Need the secondary process 3644 * based MCP portal address for this object. 3645 */ 3646 dpseci.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX); 3647 ret = dpseci_get_sec_counters(&dpseci, CMD_PRI_LOW, priv->token, 3648 &counters); 3649 if (ret) { 3650 DPAA2_SEC_ERR("SEC counters failed"); 3651 } else { 3652 DPAA2_SEC_INFO("dpseci hardware stats:" 3653 "\n\tNum of Requests Dequeued = %" PRIu64 3654 "\n\tNum of Outbound Encrypt Requests = %" PRIu64 3655 "\n\tNum of Inbound Decrypt Requests = %" PRIu64 3656 "\n\tNum of Outbound Bytes Encrypted = %" PRIu64 3657 "\n\tNum of Outbound Bytes Protected = %" PRIu64 3658 "\n\tNum of Inbound Bytes Decrypted = %" PRIu64 3659 "\n\tNum of Inbound Bytes Validated = %" PRIu64, 3660 counters.dequeued_requests, 3661 counters.ob_enc_requests, 3662 counters.ib_dec_requests, 3663 counters.ob_enc_bytes, 3664 counters.ob_prot_bytes, 3665 counters.ib_dec_bytes, 3666 counters.ib_valid_bytes); 3667 } 3668 } 3669 3670 static 3671 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev) 3672 { 3673 int i; 3674 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3675 (dev->data->queue_pairs); 3676 3677 PMD_INIT_FUNC_TRACE(); 3678 3679 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 3680 if (qp[i] == NULL) { 3681 DPAA2_SEC_DEBUG("Uninitialised queue pair"); 3682 continue; 3683 } 3684 qp[i]->tx_vq.rx_pkts = 0; 3685 qp[i]->tx_vq.tx_pkts = 0; 3686 qp[i]->tx_vq.err_pkts = 0; 3687 qp[i]->rx_vq.rx_pkts = 0; 3688 qp[i]->rx_vq.tx_pkts = 0; 3689 qp[i]->rx_vq.err_pkts = 0; 3690 } 3691 } 3692 3693 static void __rte_hot 3694 dpaa2_sec_process_parallel_event(struct qbman_swp *swp, 3695 const struct qbman_fd *fd, 3696 const struct qbman_result *dq, 3697 struct dpaa2_queue *rxq, 3698 struct rte_event *ev) 3699 { 3700 /* Prefetching mbuf */ 3701 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)- 3702 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size)); 3703 3704 /* Prefetching ipsec crypto_op stored in priv data of mbuf */ 3705 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64)); 3706 3707 ev->flow_id = rxq->ev.flow_id; 3708 ev->sub_event_type = rxq->ev.sub_event_type; 3709 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3710 ev->op = RTE_EVENT_OP_NEW; 3711 ev->sched_type = rxq->ev.sched_type; 3712 ev->queue_id = rxq->ev.queue_id; 3713 ev->priority = rxq->ev.priority; 3714 ev->event_ptr = sec_fd_to_mbuf(fd); 3715 3716 qbman_swp_dqrr_consume(swp, dq); 3717 } 3718 static void 3719 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused, 3720 const struct qbman_fd *fd, 3721 const struct qbman_result *dq, 3722 struct dpaa2_queue *rxq, 3723 struct rte_event *ev) 3724 { 3725 uint8_t dqrr_index; 3726 struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr; 3727 /* Prefetching mbuf */ 3728 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)- 3729 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size)); 3730 3731 /* Prefetching ipsec crypto_op stored in priv data of mbuf */ 3732 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64)); 3733 3734 ev->flow_id = rxq->ev.flow_id; 3735 ev->sub_event_type = rxq->ev.sub_event_type; 3736 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3737 ev->op = RTE_EVENT_OP_NEW; 3738 ev->sched_type = rxq->ev.sched_type; 3739 ev->queue_id = rxq->ev.queue_id; 3740 ev->priority = rxq->ev.priority; 3741 3742 ev->event_ptr = sec_fd_to_mbuf(fd); 3743 dqrr_index = qbman_get_dqrr_idx(dq); 3744 *dpaa2_seqn(crypto_op->sym->m_src) = dqrr_index + 1; 3745 DPAA2_PER_LCORE_DQRR_SIZE++; 3746 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; 3747 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src; 3748 } 3749 3750 int 3751 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev, 3752 int qp_id, 3753 struct dpaa2_dpcon_dev *dpcon, 3754 const struct rte_event *event) 3755 { 3756 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3757 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3758 struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id]; 3759 struct dpseci_rx_queue_cfg cfg; 3760 uint8_t priority; 3761 int ret; 3762 3763 if (event->sched_type == RTE_SCHED_TYPE_PARALLEL) 3764 qp->rx_vq.cb = dpaa2_sec_process_parallel_event; 3765 else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) 3766 qp->rx_vq.cb = dpaa2_sec_process_atomic_event; 3767 else 3768 return -EINVAL; 3769 3770 priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / event->priority) * 3771 (dpcon->num_priorities - 1); 3772 3773 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 3774 cfg.options = DPSECI_QUEUE_OPT_DEST; 3775 cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON; 3776 cfg.dest_cfg.dest_id = dpcon->dpcon_id; 3777 cfg.dest_cfg.priority = priority; 3778 3779 cfg.options |= DPSECI_QUEUE_OPT_USER_CTX; 3780 cfg.user_ctx = (size_t)(qp); 3781 if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) { 3782 cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION; 3783 cfg.order_preservation_en = 1; 3784 } 3785 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 3786 qp_id, &cfg); 3787 if (ret) { 3788 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret); 3789 return ret; 3790 } 3791 3792 memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event)); 3793 3794 return 0; 3795 } 3796 3797 int 3798 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev, 3799 int qp_id) 3800 { 3801 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3802 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3803 struct dpseci_rx_queue_cfg cfg; 3804 int ret; 3805 3806 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 3807 cfg.options = DPSECI_QUEUE_OPT_DEST; 3808 cfg.dest_cfg.dest_type = DPSECI_DEST_NONE; 3809 3810 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 3811 qp_id, &cfg); 3812 if (ret) 3813 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret); 3814 3815 return ret; 3816 } 3817 3818 static struct rte_cryptodev_ops crypto_ops = { 3819 .dev_configure = dpaa2_sec_dev_configure, 3820 .dev_start = dpaa2_sec_dev_start, 3821 .dev_stop = dpaa2_sec_dev_stop, 3822 .dev_close = dpaa2_sec_dev_close, 3823 .dev_infos_get = dpaa2_sec_dev_infos_get, 3824 .stats_get = dpaa2_sec_stats_get, 3825 .stats_reset = dpaa2_sec_stats_reset, 3826 .queue_pair_setup = dpaa2_sec_queue_pair_setup, 3827 .queue_pair_release = dpaa2_sec_queue_pair_release, 3828 .sym_session_get_size = dpaa2_sec_sym_session_get_size, 3829 .sym_session_configure = dpaa2_sec_sym_session_configure, 3830 .sym_session_clear = dpaa2_sec_sym_session_clear, 3831 /* Raw data-path API related operations */ 3832 .sym_get_raw_dp_ctx_size = dpaa2_sec_get_dp_ctx_size, 3833 .sym_configure_raw_dp_ctx = dpaa2_sec_configure_raw_dp_ctx, 3834 }; 3835 3836 #ifdef RTE_LIB_SECURITY 3837 static const struct rte_security_capability * 3838 dpaa2_sec_capabilities_get(void *device __rte_unused) 3839 { 3840 return dpaa2_sec_security_cap; 3841 } 3842 3843 static const struct rte_security_ops dpaa2_sec_security_ops = { 3844 .session_create = dpaa2_sec_security_session_create, 3845 .session_update = NULL, 3846 .session_stats_get = NULL, 3847 .session_destroy = dpaa2_sec_security_session_destroy, 3848 .set_pkt_metadata = NULL, 3849 .capabilities_get = dpaa2_sec_capabilities_get 3850 }; 3851 #endif 3852 3853 static int 3854 dpaa2_sec_uninit(const struct rte_cryptodev *dev) 3855 { 3856 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3857 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3858 int ret; 3859 3860 PMD_INIT_FUNC_TRACE(); 3861 3862 /* Function is reverse of dpaa2_sec_dev_init. 3863 * It does the following: 3864 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id 3865 * 2. Close the DPSECI device 3866 * 3. Free the allocated resources. 3867 */ 3868 3869 /*Close the device at underlying layer*/ 3870 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token); 3871 if (ret) { 3872 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret); 3873 return -1; 3874 } 3875 3876 /*Free the allocated memory for ethernet private data and dpseci*/ 3877 priv->hw = NULL; 3878 rte_free(dpseci); 3879 rte_free(dev->security_ctx); 3880 rte_mempool_free(priv->fle_pool); 3881 3882 DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u", 3883 dev->data->name, rte_socket_id()); 3884 3885 return 0; 3886 } 3887 3888 static int 3889 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev) 3890 { 3891 struct dpaa2_sec_dev_private *internals; 3892 struct rte_device *dev = cryptodev->device; 3893 struct rte_dpaa2_device *dpaa2_dev; 3894 #ifdef RTE_LIB_SECURITY 3895 struct rte_security_ctx *security_instance; 3896 #endif 3897 struct fsl_mc_io *dpseci; 3898 uint16_t token; 3899 struct dpseci_attr attr; 3900 int retcode, hw_id; 3901 char str[30]; 3902 3903 PMD_INIT_FUNC_TRACE(); 3904 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 3905 hw_id = dpaa2_dev->object_id; 3906 3907 cryptodev->driver_id = cryptodev_driver_id; 3908 cryptodev->dev_ops = &crypto_ops; 3909 3910 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst; 3911 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst; 3912 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 3913 RTE_CRYPTODEV_FF_HW_ACCELERATED | 3914 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 3915 RTE_CRYPTODEV_FF_SECURITY | 3916 RTE_CRYPTODEV_FF_SYM_RAW_DP | 3917 RTE_CRYPTODEV_FF_IN_PLACE_SGL | 3918 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | 3919 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | 3920 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | 3921 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; 3922 3923 internals = cryptodev->data->dev_private; 3924 3925 /* 3926 * For secondary processes, we don't initialise any further as primary 3927 * has already done this work. Only check we don't need a different 3928 * RX function 3929 */ 3930 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 3931 DPAA2_SEC_DEBUG("Device already init by primary process"); 3932 return 0; 3933 } 3934 #ifdef RTE_LIB_SECURITY 3935 /* Initialize security_ctx only for primary process*/ 3936 security_instance = rte_malloc("rte_security_instances_ops", 3937 sizeof(struct rte_security_ctx), 0); 3938 if (security_instance == NULL) 3939 return -ENOMEM; 3940 security_instance->device = (void *)cryptodev; 3941 security_instance->ops = &dpaa2_sec_security_ops; 3942 security_instance->sess_cnt = 0; 3943 cryptodev->security_ctx = security_instance; 3944 #endif 3945 /*Open the rte device via MC and save the handle for further use*/ 3946 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1, 3947 sizeof(struct fsl_mc_io), 0); 3948 if (!dpseci) { 3949 DPAA2_SEC_ERR( 3950 "Error in allocating the memory for dpsec object"); 3951 return -ENOMEM; 3952 } 3953 dpseci->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX); 3954 3955 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token); 3956 if (retcode != 0) { 3957 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x", 3958 retcode); 3959 goto init_error; 3960 } 3961 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr); 3962 if (retcode != 0) { 3963 DPAA2_SEC_ERR( 3964 "Cannot get dpsec device attributed: Error = %x", 3965 retcode); 3966 goto init_error; 3967 } 3968 snprintf(cryptodev->data->name, sizeof(cryptodev->data->name), 3969 "dpsec-%u", hw_id); 3970 3971 internals->max_nb_queue_pairs = attr.num_tx_queues; 3972 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs; 3973 internals->hw = dpseci; 3974 internals->token = token; 3975 3976 snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d", 3977 getpid(), cryptodev->data->dev_id); 3978 internals->fle_pool = rte_mempool_create((const char *)str, 3979 FLE_POOL_NUM_BUFS, 3980 FLE_POOL_BUF_SIZE, 3981 FLE_POOL_CACHE_SIZE, 0, 3982 NULL, NULL, NULL, NULL, 3983 SOCKET_ID_ANY, 0); 3984 if (!internals->fle_pool) { 3985 DPAA2_SEC_ERR("Mempool (%s) creation failed", str); 3986 goto init_error; 3987 } 3988 3989 DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name); 3990 return 0; 3991 3992 init_error: 3993 DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name); 3994 3995 /* dpaa2_sec_uninit(crypto_dev_name); */ 3996 return -EFAULT; 3997 } 3998 3999 static int 4000 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused, 4001 struct rte_dpaa2_device *dpaa2_dev) 4002 { 4003 struct rte_cryptodev *cryptodev; 4004 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 4005 4006 int retval; 4007 4008 snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d", 4009 dpaa2_dev->object_id); 4010 4011 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id()); 4012 if (cryptodev == NULL) 4013 return -ENOMEM; 4014 4015 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 4016 cryptodev->data->dev_private = rte_zmalloc_socket( 4017 "cryptodev private structure", 4018 sizeof(struct dpaa2_sec_dev_private), 4019 RTE_CACHE_LINE_SIZE, 4020 rte_socket_id()); 4021 4022 if (cryptodev->data->dev_private == NULL) 4023 rte_panic("Cannot allocate memzone for private " 4024 "device data"); 4025 } 4026 4027 dpaa2_dev->cryptodev = cryptodev; 4028 cryptodev->device = &dpaa2_dev->device; 4029 4030 /* init user callbacks */ 4031 TAILQ_INIT(&(cryptodev->link_intr_cbs)); 4032 4033 if (dpaa2_svr_family == SVR_LX2160A) 4034 rta_set_sec_era(RTA_SEC_ERA_10); 4035 else 4036 rta_set_sec_era(RTA_SEC_ERA_8); 4037 4038 DPAA2_SEC_INFO("2-SEC ERA is %d", rta_get_sec_era()); 4039 4040 /* Invoke PMD device initialization function */ 4041 retval = dpaa2_sec_dev_init(cryptodev); 4042 if (retval == 0) { 4043 rte_cryptodev_pmd_probing_finish(cryptodev); 4044 return 0; 4045 } 4046 4047 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 4048 rte_free(cryptodev->data->dev_private); 4049 4050 cryptodev->attached = RTE_CRYPTODEV_DETACHED; 4051 4052 return -ENXIO; 4053 } 4054 4055 static int 4056 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev) 4057 { 4058 struct rte_cryptodev *cryptodev; 4059 int ret; 4060 4061 cryptodev = dpaa2_dev->cryptodev; 4062 if (cryptodev == NULL) 4063 return -ENODEV; 4064 4065 ret = dpaa2_sec_uninit(cryptodev); 4066 if (ret) 4067 return ret; 4068 4069 return rte_cryptodev_pmd_destroy(cryptodev); 4070 } 4071 4072 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = { 4073 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA, 4074 .drv_type = DPAA2_CRYPTO, 4075 .driver = { 4076 .name = "DPAA2 SEC PMD" 4077 }, 4078 .probe = cryptodev_dpaa2_sec_probe, 4079 .remove = cryptodev_dpaa2_sec_remove, 4080 }; 4081 4082 static struct cryptodev_driver dpaa2_sec_crypto_drv; 4083 4084 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver); 4085 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, 4086 rte_dpaa2_sec_driver.driver, cryptodev_driver_id); 4087 RTE_LOG_REGISTER(dpaa2_logtype_sec, pmd.crypto.dpaa2, NOTICE); 4088