1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2016-2019 NXP 5 * 6 */ 7 8 #include <time.h> 9 #include <net/if.h> 10 #include <unistd.h> 11 12 #include <rte_ip.h> 13 #include <rte_mbuf.h> 14 #include <rte_cryptodev.h> 15 #include <rte_malloc.h> 16 #include <rte_memcpy.h> 17 #include <rte_string_fns.h> 18 #include <rte_cycles.h> 19 #include <rte_kvargs.h> 20 #include <rte_dev.h> 21 #include <rte_cryptodev_pmd.h> 22 #include <rte_common.h> 23 #include <rte_fslmc.h> 24 #include <fslmc_vfio.h> 25 #include <dpaa2_hw_pvt.h> 26 #include <dpaa2_hw_dpio.h> 27 #include <dpaa2_hw_mempool.h> 28 #include <fsl_dpopr.h> 29 #include <fsl_dpseci.h> 30 #include <fsl_mc_sys.h> 31 32 #include "dpaa2_sec_priv.h" 33 #include "dpaa2_sec_event.h" 34 #include "dpaa2_sec_logs.h" 35 36 /* RTA header files */ 37 #include <desc/ipsec.h> 38 #include <desc/pdcp.h> 39 #include <desc/algo.h> 40 41 /* Minimum job descriptor consists of a oneword job descriptor HEADER and 42 * a pointer to the shared descriptor 43 */ 44 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ) 45 #define FSL_VENDOR_ID 0x1957 46 #define FSL_DEVICE_ID 0x410 47 #define FSL_SUBSYSTEM_SEC 1 48 #define FSL_MC_DPSECI_DEVID 3 49 50 #define NO_PREFETCH 0 51 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */ 52 #define FLE_POOL_NUM_BUFS 32000 53 #define FLE_POOL_BUF_SIZE 256 54 #define FLE_POOL_CACHE_SIZE 512 55 #define FLE_SG_MEM_SIZE(num) (FLE_POOL_BUF_SIZE + ((num) * 32)) 56 #define SEC_FLC_DHR_OUTBOUND -114 57 #define SEC_FLC_DHR_INBOUND 0 58 59 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8; 60 61 static uint8_t cryptodev_driver_id; 62 63 int dpaa2_logtype_sec; 64 65 #ifdef RTE_LIBRTE_SECURITY 66 static inline int 67 build_proto_compound_sg_fd(dpaa2_sec_session *sess, 68 struct rte_crypto_op *op, 69 struct qbman_fd *fd, uint16_t bpid) 70 { 71 struct rte_crypto_sym_op *sym_op = op->sym; 72 struct ctxt_priv *priv = sess->ctxt; 73 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 74 struct sec_flow_context *flc; 75 struct rte_mbuf *mbuf; 76 uint32_t in_len = 0, out_len = 0; 77 78 if (sym_op->m_dst) 79 mbuf = sym_op->m_dst; 80 else 81 mbuf = sym_op->m_src; 82 83 /* first FLE entry used to store mbuf and session ctxt */ 84 fle = (struct qbman_fle *)rte_malloc(NULL, 85 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 86 RTE_CACHE_LINE_SIZE); 87 if (unlikely(!fle)) { 88 DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE"); 89 return -1; 90 } 91 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 92 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 93 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 94 95 /* Save the shared descriptor */ 96 flc = &priv->flc_desc[0].flc; 97 98 op_fle = fle + 1; 99 ip_fle = fle + 2; 100 sge = fle + 3; 101 102 if (likely(bpid < MAX_BPID)) { 103 DPAA2_SET_FD_BPID(fd, bpid); 104 DPAA2_SET_FLE_BPID(op_fle, bpid); 105 DPAA2_SET_FLE_BPID(ip_fle, bpid); 106 } else { 107 DPAA2_SET_FD_IVP(fd); 108 DPAA2_SET_FLE_IVP(op_fle); 109 DPAA2_SET_FLE_IVP(ip_fle); 110 } 111 112 /* Configure FD as a FRAME LIST */ 113 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 114 DPAA2_SET_FD_COMPOUND_FMT(fd); 115 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 116 117 /* Configure Output FLE with Scatter/Gather Entry */ 118 DPAA2_SET_FLE_SG_EXT(op_fle); 119 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 120 121 /* Configure Output SGE for Encap/Decap */ 122 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 123 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 124 /* o/p segs */ 125 while (mbuf->next) { 126 sge->length = mbuf->data_len; 127 out_len += sge->length; 128 sge++; 129 mbuf = mbuf->next; 130 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 131 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 132 } 133 /* using buf_len for last buf - so that extra data can be added */ 134 sge->length = mbuf->buf_len - mbuf->data_off; 135 out_len += sge->length; 136 137 DPAA2_SET_FLE_FIN(sge); 138 op_fle->length = out_len; 139 140 sge++; 141 mbuf = sym_op->m_src; 142 143 /* Configure Input FLE with Scatter/Gather Entry */ 144 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 145 DPAA2_SET_FLE_SG_EXT(ip_fle); 146 DPAA2_SET_FLE_FIN(ip_fle); 147 148 /* Configure input SGE for Encap/Decap */ 149 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 150 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 151 sge->length = mbuf->data_len; 152 in_len += sge->length; 153 154 mbuf = mbuf->next; 155 /* i/p segs */ 156 while (mbuf) { 157 sge++; 158 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 159 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 160 sge->length = mbuf->data_len; 161 in_len += sge->length; 162 mbuf = mbuf->next; 163 } 164 ip_fle->length = in_len; 165 DPAA2_SET_FLE_FIN(sge); 166 167 /* In case of PDCP, per packet HFN is stored in 168 * mbuf priv after sym_op. 169 */ 170 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) { 171 uint32_t hfn_ovd = *((uint8_t *)op + sess->pdcp.hfn_ovd_offset); 172 /*enable HFN override override */ 173 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd); 174 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd); 175 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd); 176 } 177 DPAA2_SET_FD_LEN(fd, ip_fle->length); 178 179 return 0; 180 } 181 182 static inline int 183 build_proto_compound_fd(dpaa2_sec_session *sess, 184 struct rte_crypto_op *op, 185 struct qbman_fd *fd, uint16_t bpid) 186 { 187 struct rte_crypto_sym_op *sym_op = op->sym; 188 struct ctxt_priv *priv = sess->ctxt; 189 struct qbman_fle *fle, *ip_fle, *op_fle; 190 struct sec_flow_context *flc; 191 struct rte_mbuf *src_mbuf = sym_op->m_src; 192 struct rte_mbuf *dst_mbuf = sym_op->m_dst; 193 int retval; 194 195 if (!dst_mbuf) 196 dst_mbuf = src_mbuf; 197 198 /* Save the shared descriptor */ 199 flc = &priv->flc_desc[0].flc; 200 201 /* we are using the first FLE entry to store Mbuf */ 202 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 203 if (retval) { 204 DPAA2_SEC_DP_ERR("Memory alloc failed"); 205 return -1; 206 } 207 memset(fle, 0, FLE_POOL_BUF_SIZE); 208 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 209 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 210 211 op_fle = fle + 1; 212 ip_fle = fle + 2; 213 214 if (likely(bpid < MAX_BPID)) { 215 DPAA2_SET_FD_BPID(fd, bpid); 216 DPAA2_SET_FLE_BPID(op_fle, bpid); 217 DPAA2_SET_FLE_BPID(ip_fle, bpid); 218 } else { 219 DPAA2_SET_FD_IVP(fd); 220 DPAA2_SET_FLE_IVP(op_fle); 221 DPAA2_SET_FLE_IVP(ip_fle); 222 } 223 224 /* Configure FD as a FRAME LIST */ 225 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 226 DPAA2_SET_FD_COMPOUND_FMT(fd); 227 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 228 229 /* Configure Output FLE with dst mbuf data */ 230 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf)); 231 DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off); 232 DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len); 233 234 /* Configure Input FLE with src mbuf data */ 235 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf)); 236 DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off); 237 DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len); 238 239 DPAA2_SET_FD_LEN(fd, ip_fle->length); 240 DPAA2_SET_FLE_FIN(ip_fle); 241 242 /* In case of PDCP, per packet HFN is stored in 243 * mbuf priv after sym_op. 244 */ 245 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) { 246 uint32_t hfn_ovd = *((uint8_t *)op + sess->pdcp.hfn_ovd_offset); 247 /*enable HFN override override */ 248 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd); 249 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd); 250 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd); 251 } 252 253 return 0; 254 255 } 256 257 static inline int 258 build_proto_fd(dpaa2_sec_session *sess, 259 struct rte_crypto_op *op, 260 struct qbman_fd *fd, uint16_t bpid) 261 { 262 struct rte_crypto_sym_op *sym_op = op->sym; 263 if (sym_op->m_dst) 264 return build_proto_compound_fd(sess, op, fd, bpid); 265 266 struct ctxt_priv *priv = sess->ctxt; 267 struct sec_flow_context *flc; 268 struct rte_mbuf *mbuf = sym_op->m_src; 269 270 if (likely(bpid < MAX_BPID)) 271 DPAA2_SET_FD_BPID(fd, bpid); 272 else 273 DPAA2_SET_FD_IVP(fd); 274 275 /* Save the shared descriptor */ 276 flc = &priv->flc_desc[0].flc; 277 278 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 279 DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off); 280 DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len); 281 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 282 283 /* save physical address of mbuf */ 284 op->sym->aead.digest.phys_addr = mbuf->buf_iova; 285 mbuf->buf_iova = (size_t)op; 286 287 return 0; 288 } 289 #endif 290 291 static inline int 292 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess, 293 struct rte_crypto_op *op, 294 struct qbman_fd *fd, __rte_unused uint16_t bpid) 295 { 296 struct rte_crypto_sym_op *sym_op = op->sym; 297 struct ctxt_priv *priv = sess->ctxt; 298 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 299 struct sec_flow_context *flc; 300 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len; 301 int icv_len = sess->digest_length; 302 uint8_t *old_icv; 303 struct rte_mbuf *mbuf; 304 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 305 sess->iv.offset); 306 307 if (sym_op->m_dst) 308 mbuf = sym_op->m_dst; 309 else 310 mbuf = sym_op->m_src; 311 312 /* first FLE entry used to store mbuf and session ctxt */ 313 fle = (struct qbman_fle *)rte_malloc(NULL, 314 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 315 RTE_CACHE_LINE_SIZE); 316 if (unlikely(!fle)) { 317 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE"); 318 return -1; 319 } 320 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 321 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 322 DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv); 323 324 op_fle = fle + 1; 325 ip_fle = fle + 2; 326 sge = fle + 3; 327 328 /* Save the shared descriptor */ 329 flc = &priv->flc_desc[0].flc; 330 331 /* Configure FD as a FRAME LIST */ 332 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 333 DPAA2_SET_FD_COMPOUND_FMT(fd); 334 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 335 336 DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n" 337 "iv-len=%d data_off: 0x%x\n", 338 sym_op->aead.data.offset, 339 sym_op->aead.data.length, 340 sess->digest_length, 341 sess->iv.length, 342 sym_op->m_src->data_off); 343 344 /* Configure Output FLE with Scatter/Gather Entry */ 345 DPAA2_SET_FLE_SG_EXT(op_fle); 346 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 347 348 if (auth_only_len) 349 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len); 350 351 op_fle->length = (sess->dir == DIR_ENC) ? 352 (sym_op->aead.data.length + icv_len) : 353 sym_op->aead.data.length; 354 355 /* Configure Output SGE for Encap/Decap */ 356 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 357 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset); 358 sge->length = mbuf->data_len - sym_op->aead.data.offset; 359 360 mbuf = mbuf->next; 361 /* o/p segs */ 362 while (mbuf) { 363 sge++; 364 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 365 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 366 sge->length = mbuf->data_len; 367 mbuf = mbuf->next; 368 } 369 sge->length -= icv_len; 370 371 if (sess->dir == DIR_ENC) { 372 sge++; 373 DPAA2_SET_FLE_ADDR(sge, 374 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data)); 375 sge->length = icv_len; 376 } 377 DPAA2_SET_FLE_FIN(sge); 378 379 sge++; 380 mbuf = sym_op->m_src; 381 382 /* Configure Input FLE with Scatter/Gather Entry */ 383 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 384 DPAA2_SET_FLE_SG_EXT(ip_fle); 385 DPAA2_SET_FLE_FIN(ip_fle); 386 ip_fle->length = (sess->dir == DIR_ENC) ? 387 (sym_op->aead.data.length + sess->iv.length + auth_only_len) : 388 (sym_op->aead.data.length + sess->iv.length + auth_only_len + 389 icv_len); 390 391 /* Configure Input SGE for Encap/Decap */ 392 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr)); 393 sge->length = sess->iv.length; 394 395 sge++; 396 if (auth_only_len) { 397 DPAA2_SET_FLE_ADDR(sge, 398 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data)); 399 sge->length = auth_only_len; 400 sge++; 401 } 402 403 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 404 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + 405 mbuf->data_off); 406 sge->length = mbuf->data_len - sym_op->aead.data.offset; 407 408 mbuf = mbuf->next; 409 /* i/p segs */ 410 while (mbuf) { 411 sge++; 412 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 413 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 414 sge->length = mbuf->data_len; 415 mbuf = mbuf->next; 416 } 417 418 if (sess->dir == DIR_DEC) { 419 sge++; 420 old_icv = (uint8_t *)(sge + 1); 421 memcpy(old_icv, sym_op->aead.digest.data, icv_len); 422 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 423 sge->length = icv_len; 424 } 425 426 DPAA2_SET_FLE_FIN(sge); 427 if (auth_only_len) { 428 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len); 429 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 430 } 431 DPAA2_SET_FD_LEN(fd, ip_fle->length); 432 433 return 0; 434 } 435 436 static inline int 437 build_authenc_gcm_fd(dpaa2_sec_session *sess, 438 struct rte_crypto_op *op, 439 struct qbman_fd *fd, uint16_t bpid) 440 { 441 struct rte_crypto_sym_op *sym_op = op->sym; 442 struct ctxt_priv *priv = sess->ctxt; 443 struct qbman_fle *fle, *sge; 444 struct sec_flow_context *flc; 445 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len; 446 int icv_len = sess->digest_length, retval; 447 uint8_t *old_icv; 448 struct rte_mbuf *dst; 449 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 450 sess->iv.offset); 451 452 if (sym_op->m_dst) 453 dst = sym_op->m_dst; 454 else 455 dst = sym_op->m_src; 456 457 /* TODO we are using the first FLE entry to store Mbuf and session ctxt. 458 * Currently we donot know which FLE has the mbuf stored. 459 * So while retreiving we can go back 1 FLE from the FD -ADDR 460 * to get the MBUF Addr from the previous FLE. 461 * We can have a better approach to use the inline Mbuf 462 */ 463 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 464 if (retval) { 465 DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE"); 466 return -1; 467 } 468 memset(fle, 0, FLE_POOL_BUF_SIZE); 469 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 470 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 471 fle = fle + 1; 472 sge = fle + 2; 473 if (likely(bpid < MAX_BPID)) { 474 DPAA2_SET_FD_BPID(fd, bpid); 475 DPAA2_SET_FLE_BPID(fle, bpid); 476 DPAA2_SET_FLE_BPID(fle + 1, bpid); 477 DPAA2_SET_FLE_BPID(sge, bpid); 478 DPAA2_SET_FLE_BPID(sge + 1, bpid); 479 DPAA2_SET_FLE_BPID(sge + 2, bpid); 480 DPAA2_SET_FLE_BPID(sge + 3, bpid); 481 } else { 482 DPAA2_SET_FD_IVP(fd); 483 DPAA2_SET_FLE_IVP(fle); 484 DPAA2_SET_FLE_IVP((fle + 1)); 485 DPAA2_SET_FLE_IVP(sge); 486 DPAA2_SET_FLE_IVP((sge + 1)); 487 DPAA2_SET_FLE_IVP((sge + 2)); 488 DPAA2_SET_FLE_IVP((sge + 3)); 489 } 490 491 /* Save the shared descriptor */ 492 flc = &priv->flc_desc[0].flc; 493 /* Configure FD as a FRAME LIST */ 494 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 495 DPAA2_SET_FD_COMPOUND_FMT(fd); 496 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 497 498 DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n" 499 "iv-len=%d data_off: 0x%x\n", 500 sym_op->aead.data.offset, 501 sym_op->aead.data.length, 502 sess->digest_length, 503 sess->iv.length, 504 sym_op->m_src->data_off); 505 506 /* Configure Output FLE with Scatter/Gather Entry */ 507 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 508 if (auth_only_len) 509 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 510 fle->length = (sess->dir == DIR_ENC) ? 511 (sym_op->aead.data.length + icv_len) : 512 sym_op->aead.data.length; 513 514 DPAA2_SET_FLE_SG_EXT(fle); 515 516 /* Configure Output SGE for Encap/Decap */ 517 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 518 DPAA2_SET_FLE_OFFSET(sge, dst->data_off + sym_op->aead.data.offset); 519 sge->length = sym_op->aead.data.length; 520 521 if (sess->dir == DIR_ENC) { 522 sge++; 523 DPAA2_SET_FLE_ADDR(sge, 524 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data)); 525 sge->length = sess->digest_length; 526 } 527 DPAA2_SET_FLE_FIN(sge); 528 529 sge++; 530 fle++; 531 532 /* Configure Input FLE with Scatter/Gather Entry */ 533 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 534 DPAA2_SET_FLE_SG_EXT(fle); 535 DPAA2_SET_FLE_FIN(fle); 536 fle->length = (sess->dir == DIR_ENC) ? 537 (sym_op->aead.data.length + sess->iv.length + auth_only_len) : 538 (sym_op->aead.data.length + sess->iv.length + auth_only_len + 539 sess->digest_length); 540 541 /* Configure Input SGE for Encap/Decap */ 542 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr)); 543 sge->length = sess->iv.length; 544 sge++; 545 if (auth_only_len) { 546 DPAA2_SET_FLE_ADDR(sge, 547 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data)); 548 sge->length = auth_only_len; 549 DPAA2_SET_FLE_BPID(sge, bpid); 550 sge++; 551 } 552 553 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 554 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + 555 sym_op->m_src->data_off); 556 sge->length = sym_op->aead.data.length; 557 if (sess->dir == DIR_DEC) { 558 sge++; 559 old_icv = (uint8_t *)(sge + 1); 560 memcpy(old_icv, sym_op->aead.digest.data, 561 sess->digest_length); 562 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 563 sge->length = sess->digest_length; 564 } 565 DPAA2_SET_FLE_FIN(sge); 566 567 if (auth_only_len) { 568 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 569 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 570 } 571 572 DPAA2_SET_FD_LEN(fd, fle->length); 573 return 0; 574 } 575 576 static inline int 577 build_authenc_sg_fd(dpaa2_sec_session *sess, 578 struct rte_crypto_op *op, 579 struct qbman_fd *fd, __rte_unused uint16_t bpid) 580 { 581 struct rte_crypto_sym_op *sym_op = op->sym; 582 struct ctxt_priv *priv = sess->ctxt; 583 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 584 struct sec_flow_context *flc; 585 uint16_t auth_hdr_len = sym_op->cipher.data.offset - 586 sym_op->auth.data.offset; 587 uint16_t auth_tail_len = sym_op->auth.data.length - 588 sym_op->cipher.data.length - auth_hdr_len; 589 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len; 590 int icv_len = sess->digest_length; 591 uint8_t *old_icv; 592 struct rte_mbuf *mbuf; 593 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 594 sess->iv.offset); 595 596 if (sym_op->m_dst) 597 mbuf = sym_op->m_dst; 598 else 599 mbuf = sym_op->m_src; 600 601 /* first FLE entry used to store mbuf and session ctxt */ 602 fle = (struct qbman_fle *)rte_malloc(NULL, 603 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 604 RTE_CACHE_LINE_SIZE); 605 if (unlikely(!fle)) { 606 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE"); 607 return -1; 608 } 609 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 610 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 611 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 612 613 op_fle = fle + 1; 614 ip_fle = fle + 2; 615 sge = fle + 3; 616 617 /* Save the shared descriptor */ 618 flc = &priv->flc_desc[0].flc; 619 620 /* Configure FD as a FRAME LIST */ 621 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 622 DPAA2_SET_FD_COMPOUND_FMT(fd); 623 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 624 625 DPAA2_SEC_DP_DEBUG( 626 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n" 627 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", 628 sym_op->auth.data.offset, 629 sym_op->auth.data.length, 630 sess->digest_length, 631 sym_op->cipher.data.offset, 632 sym_op->cipher.data.length, 633 sess->iv.length, 634 sym_op->m_src->data_off); 635 636 /* Configure Output FLE with Scatter/Gather Entry */ 637 DPAA2_SET_FLE_SG_EXT(op_fle); 638 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 639 640 if (auth_only_len) 641 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len); 642 643 op_fle->length = (sess->dir == DIR_ENC) ? 644 (sym_op->cipher.data.length + icv_len) : 645 sym_op->cipher.data.length; 646 647 /* Configure Output SGE for Encap/Decap */ 648 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 649 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset); 650 sge->length = mbuf->data_len - sym_op->auth.data.offset; 651 652 mbuf = mbuf->next; 653 /* o/p segs */ 654 while (mbuf) { 655 sge++; 656 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 657 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 658 sge->length = mbuf->data_len; 659 mbuf = mbuf->next; 660 } 661 sge->length -= icv_len; 662 663 if (sess->dir == DIR_ENC) { 664 sge++; 665 DPAA2_SET_FLE_ADDR(sge, 666 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 667 sge->length = icv_len; 668 } 669 DPAA2_SET_FLE_FIN(sge); 670 671 sge++; 672 mbuf = sym_op->m_src; 673 674 /* Configure Input FLE with Scatter/Gather Entry */ 675 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 676 DPAA2_SET_FLE_SG_EXT(ip_fle); 677 DPAA2_SET_FLE_FIN(ip_fle); 678 ip_fle->length = (sess->dir == DIR_ENC) ? 679 (sym_op->auth.data.length + sess->iv.length) : 680 (sym_op->auth.data.length + sess->iv.length + 681 icv_len); 682 683 /* Configure Input SGE for Encap/Decap */ 684 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 685 sge->length = sess->iv.length; 686 687 sge++; 688 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 689 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + 690 mbuf->data_off); 691 sge->length = mbuf->data_len - sym_op->auth.data.offset; 692 693 mbuf = mbuf->next; 694 /* i/p segs */ 695 while (mbuf) { 696 sge++; 697 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 698 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 699 sge->length = mbuf->data_len; 700 mbuf = mbuf->next; 701 } 702 sge->length -= icv_len; 703 704 if (sess->dir == DIR_DEC) { 705 sge++; 706 old_icv = (uint8_t *)(sge + 1); 707 memcpy(old_icv, sym_op->auth.digest.data, 708 icv_len); 709 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 710 sge->length = icv_len; 711 } 712 713 DPAA2_SET_FLE_FIN(sge); 714 if (auth_only_len) { 715 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len); 716 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 717 } 718 DPAA2_SET_FD_LEN(fd, ip_fle->length); 719 720 return 0; 721 } 722 723 static inline int 724 build_authenc_fd(dpaa2_sec_session *sess, 725 struct rte_crypto_op *op, 726 struct qbman_fd *fd, uint16_t bpid) 727 { 728 struct rte_crypto_sym_op *sym_op = op->sym; 729 struct ctxt_priv *priv = sess->ctxt; 730 struct qbman_fle *fle, *sge; 731 struct sec_flow_context *flc; 732 uint16_t auth_hdr_len = sym_op->cipher.data.offset - 733 sym_op->auth.data.offset; 734 uint16_t auth_tail_len = sym_op->auth.data.length - 735 sym_op->cipher.data.length - auth_hdr_len; 736 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len; 737 738 int icv_len = sess->digest_length, retval; 739 uint8_t *old_icv; 740 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 741 sess->iv.offset); 742 struct rte_mbuf *dst; 743 744 if (sym_op->m_dst) 745 dst = sym_op->m_dst; 746 else 747 dst = sym_op->m_src; 748 749 /* we are using the first FLE entry to store Mbuf. 750 * Currently we donot know which FLE has the mbuf stored. 751 * So while retreiving we can go back 1 FLE from the FD -ADDR 752 * to get the MBUF Addr from the previous FLE. 753 * We can have a better approach to use the inline Mbuf 754 */ 755 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 756 if (retval) { 757 DPAA2_SEC_ERR("Memory alloc failed for SGE"); 758 return -1; 759 } 760 memset(fle, 0, FLE_POOL_BUF_SIZE); 761 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 762 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 763 fle = fle + 1; 764 sge = fle + 2; 765 if (likely(bpid < MAX_BPID)) { 766 DPAA2_SET_FD_BPID(fd, bpid); 767 DPAA2_SET_FLE_BPID(fle, bpid); 768 DPAA2_SET_FLE_BPID(fle + 1, bpid); 769 DPAA2_SET_FLE_BPID(sge, bpid); 770 DPAA2_SET_FLE_BPID(sge + 1, bpid); 771 DPAA2_SET_FLE_BPID(sge + 2, bpid); 772 DPAA2_SET_FLE_BPID(sge + 3, bpid); 773 } else { 774 DPAA2_SET_FD_IVP(fd); 775 DPAA2_SET_FLE_IVP(fle); 776 DPAA2_SET_FLE_IVP((fle + 1)); 777 DPAA2_SET_FLE_IVP(sge); 778 DPAA2_SET_FLE_IVP((sge + 1)); 779 DPAA2_SET_FLE_IVP((sge + 2)); 780 DPAA2_SET_FLE_IVP((sge + 3)); 781 } 782 783 /* Save the shared descriptor */ 784 flc = &priv->flc_desc[0].flc; 785 /* Configure FD as a FRAME LIST */ 786 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 787 DPAA2_SET_FD_COMPOUND_FMT(fd); 788 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 789 790 DPAA2_SEC_DP_DEBUG( 791 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n" 792 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", 793 sym_op->auth.data.offset, 794 sym_op->auth.data.length, 795 sess->digest_length, 796 sym_op->cipher.data.offset, 797 sym_op->cipher.data.length, 798 sess->iv.length, 799 sym_op->m_src->data_off); 800 801 /* Configure Output FLE with Scatter/Gather Entry */ 802 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 803 if (auth_only_len) 804 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 805 fle->length = (sess->dir == DIR_ENC) ? 806 (sym_op->cipher.data.length + icv_len) : 807 sym_op->cipher.data.length; 808 809 DPAA2_SET_FLE_SG_EXT(fle); 810 811 /* Configure Output SGE for Encap/Decap */ 812 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 813 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + 814 dst->data_off); 815 sge->length = sym_op->cipher.data.length; 816 817 if (sess->dir == DIR_ENC) { 818 sge++; 819 DPAA2_SET_FLE_ADDR(sge, 820 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 821 sge->length = sess->digest_length; 822 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 823 sess->iv.length)); 824 } 825 DPAA2_SET_FLE_FIN(sge); 826 827 sge++; 828 fle++; 829 830 /* Configure Input FLE with Scatter/Gather Entry */ 831 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 832 DPAA2_SET_FLE_SG_EXT(fle); 833 DPAA2_SET_FLE_FIN(fle); 834 fle->length = (sess->dir == DIR_ENC) ? 835 (sym_op->auth.data.length + sess->iv.length) : 836 (sym_op->auth.data.length + sess->iv.length + 837 sess->digest_length); 838 839 /* Configure Input SGE for Encap/Decap */ 840 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 841 sge->length = sess->iv.length; 842 sge++; 843 844 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 845 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + 846 sym_op->m_src->data_off); 847 sge->length = sym_op->auth.data.length; 848 if (sess->dir == DIR_DEC) { 849 sge++; 850 old_icv = (uint8_t *)(sge + 1); 851 memcpy(old_icv, sym_op->auth.digest.data, 852 sess->digest_length); 853 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 854 sge->length = sess->digest_length; 855 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 856 sess->digest_length + 857 sess->iv.length)); 858 } 859 DPAA2_SET_FLE_FIN(sge); 860 if (auth_only_len) { 861 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 862 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 863 } 864 return 0; 865 } 866 867 static inline int build_auth_sg_fd( 868 dpaa2_sec_session *sess, 869 struct rte_crypto_op *op, 870 struct qbman_fd *fd, 871 __rte_unused uint16_t bpid) 872 { 873 struct rte_crypto_sym_op *sym_op = op->sym; 874 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 875 struct sec_flow_context *flc; 876 struct ctxt_priv *priv = sess->ctxt; 877 int data_len, data_offset; 878 uint8_t *old_digest; 879 struct rte_mbuf *mbuf; 880 881 data_len = sym_op->auth.data.length; 882 data_offset = sym_op->auth.data.offset; 883 884 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 885 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 886 if ((data_len & 7) || (data_offset & 7)) { 887 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes"); 888 return -1; 889 } 890 891 data_len = data_len >> 3; 892 data_offset = data_offset >> 3; 893 } 894 895 mbuf = sym_op->m_src; 896 fle = (struct qbman_fle *)rte_malloc(NULL, 897 FLE_SG_MEM_SIZE(mbuf->nb_segs), 898 RTE_CACHE_LINE_SIZE); 899 if (unlikely(!fle)) { 900 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE"); 901 return -1; 902 } 903 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs)); 904 /* first FLE entry used to store mbuf and session ctxt */ 905 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 906 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 907 op_fle = fle + 1; 908 ip_fle = fle + 2; 909 sge = fle + 3; 910 911 flc = &priv->flc_desc[DESC_INITFINAL].flc; 912 /* sg FD */ 913 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 914 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 915 DPAA2_SET_FD_COMPOUND_FMT(fd); 916 917 /* o/p fle */ 918 DPAA2_SET_FLE_ADDR(op_fle, 919 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 920 op_fle->length = sess->digest_length; 921 922 /* i/p fle */ 923 DPAA2_SET_FLE_SG_EXT(ip_fle); 924 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 925 ip_fle->length = data_len; 926 927 if (sess->iv.length) { 928 uint8_t *iv_ptr; 929 930 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 931 sess->iv.offset); 932 933 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 934 iv_ptr = conv_to_snow_f9_iv(iv_ptr); 935 sge->length = 12; 936 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 937 iv_ptr = conv_to_zuc_eia_iv(iv_ptr); 938 sge->length = 8; 939 } else { 940 sge->length = sess->iv.length; 941 } 942 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 943 ip_fle->length += sge->length; 944 sge++; 945 } 946 /* i/p 1st seg */ 947 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 948 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off); 949 950 if (data_len <= (mbuf->data_len - data_offset)) { 951 sge->length = data_len; 952 data_len = 0; 953 } else { 954 sge->length = mbuf->data_len - data_offset; 955 956 /* remaining i/p segs */ 957 while ((data_len = data_len - sge->length) && 958 (mbuf = mbuf->next)) { 959 sge++; 960 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 961 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 962 if (data_len > mbuf->data_len) 963 sge->length = mbuf->data_len; 964 else 965 sge->length = data_len; 966 } 967 } 968 969 if (sess->dir == DIR_DEC) { 970 /* Digest verification case */ 971 sge++; 972 old_digest = (uint8_t *)(sge + 1); 973 rte_memcpy(old_digest, sym_op->auth.digest.data, 974 sess->digest_length); 975 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest)); 976 sge->length = sess->digest_length; 977 ip_fle->length += sess->digest_length; 978 } 979 DPAA2_SET_FLE_FIN(sge); 980 DPAA2_SET_FLE_FIN(ip_fle); 981 DPAA2_SET_FD_LEN(fd, ip_fle->length); 982 983 return 0; 984 } 985 986 static inline int 987 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 988 struct qbman_fd *fd, uint16_t bpid) 989 { 990 struct rte_crypto_sym_op *sym_op = op->sym; 991 struct qbman_fle *fle, *sge; 992 struct sec_flow_context *flc; 993 struct ctxt_priv *priv = sess->ctxt; 994 int data_len, data_offset; 995 uint8_t *old_digest; 996 int retval; 997 998 data_len = sym_op->auth.data.length; 999 data_offset = sym_op->auth.data.offset; 1000 1001 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 1002 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 1003 if ((data_len & 7) || (data_offset & 7)) { 1004 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes"); 1005 return -1; 1006 } 1007 1008 data_len = data_len >> 3; 1009 data_offset = data_offset >> 3; 1010 } 1011 1012 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 1013 if (retval) { 1014 DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE"); 1015 return -1; 1016 } 1017 memset(fle, 0, FLE_POOL_BUF_SIZE); 1018 /* TODO we are using the first FLE entry to store Mbuf. 1019 * Currently we donot know which FLE has the mbuf stored. 1020 * So while retreiving we can go back 1 FLE from the FD -ADDR 1021 * to get the MBUF Addr from the previous FLE. 1022 * We can have a better approach to use the inline Mbuf 1023 */ 1024 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 1025 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 1026 fle = fle + 1; 1027 sge = fle + 2; 1028 1029 if (likely(bpid < MAX_BPID)) { 1030 DPAA2_SET_FD_BPID(fd, bpid); 1031 DPAA2_SET_FLE_BPID(fle, bpid); 1032 DPAA2_SET_FLE_BPID(fle + 1, bpid); 1033 DPAA2_SET_FLE_BPID(sge, bpid); 1034 DPAA2_SET_FLE_BPID(sge + 1, bpid); 1035 } else { 1036 DPAA2_SET_FD_IVP(fd); 1037 DPAA2_SET_FLE_IVP(fle); 1038 DPAA2_SET_FLE_IVP((fle + 1)); 1039 DPAA2_SET_FLE_IVP(sge); 1040 DPAA2_SET_FLE_IVP((sge + 1)); 1041 } 1042 1043 flc = &priv->flc_desc[DESC_INITFINAL].flc; 1044 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1045 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 1046 DPAA2_SET_FD_COMPOUND_FMT(fd); 1047 1048 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 1049 fle->length = sess->digest_length; 1050 fle++; 1051 1052 /* Setting input FLE */ 1053 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 1054 DPAA2_SET_FLE_SG_EXT(fle); 1055 fle->length = data_len; 1056 1057 if (sess->iv.length) { 1058 uint8_t *iv_ptr; 1059 1060 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1061 sess->iv.offset); 1062 1063 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 1064 iv_ptr = conv_to_snow_f9_iv(iv_ptr); 1065 sge->length = 12; 1066 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 1067 iv_ptr = conv_to_zuc_eia_iv(iv_ptr); 1068 sge->length = 8; 1069 } else { 1070 sge->length = sess->iv.length; 1071 } 1072 1073 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1074 fle->length = fle->length + sge->length; 1075 sge++; 1076 } 1077 1078 /* Setting data to authenticate */ 1079 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 1080 DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off); 1081 sge->length = data_len; 1082 1083 if (sess->dir == DIR_DEC) { 1084 sge++; 1085 old_digest = (uint8_t *)(sge + 1); 1086 rte_memcpy(old_digest, sym_op->auth.digest.data, 1087 sess->digest_length); 1088 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest)); 1089 sge->length = sess->digest_length; 1090 fle->length = fle->length + sess->digest_length; 1091 } 1092 1093 DPAA2_SET_FLE_FIN(sge); 1094 DPAA2_SET_FLE_FIN(fle); 1095 DPAA2_SET_FD_LEN(fd, fle->length); 1096 1097 return 0; 1098 } 1099 1100 static int 1101 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 1102 struct qbman_fd *fd, __rte_unused uint16_t bpid) 1103 { 1104 struct rte_crypto_sym_op *sym_op = op->sym; 1105 struct qbman_fle *ip_fle, *op_fle, *sge, *fle; 1106 int data_len, data_offset; 1107 struct sec_flow_context *flc; 1108 struct ctxt_priv *priv = sess->ctxt; 1109 struct rte_mbuf *mbuf; 1110 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1111 sess->iv.offset); 1112 1113 data_len = sym_op->cipher.data.length; 1114 data_offset = sym_op->cipher.data.offset; 1115 1116 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 1117 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 1118 if ((data_len & 7) || (data_offset & 7)) { 1119 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes"); 1120 return -1; 1121 } 1122 1123 data_len = data_len >> 3; 1124 data_offset = data_offset >> 3; 1125 } 1126 1127 if (sym_op->m_dst) 1128 mbuf = sym_op->m_dst; 1129 else 1130 mbuf = sym_op->m_src; 1131 1132 /* first FLE entry used to store mbuf and session ctxt */ 1133 fle = (struct qbman_fle *)rte_malloc(NULL, 1134 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 1135 RTE_CACHE_LINE_SIZE); 1136 if (!fle) { 1137 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE"); 1138 return -1; 1139 } 1140 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 1141 /* first FLE entry used to store mbuf and session ctxt */ 1142 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 1143 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 1144 1145 op_fle = fle + 1; 1146 ip_fle = fle + 2; 1147 sge = fle + 3; 1148 1149 flc = &priv->flc_desc[0].flc; 1150 1151 DPAA2_SEC_DP_DEBUG( 1152 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d" 1153 " data_off: 0x%x\n", 1154 data_offset, 1155 data_len, 1156 sess->iv.length, 1157 sym_op->m_src->data_off); 1158 1159 /* o/p fle */ 1160 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 1161 op_fle->length = data_len; 1162 DPAA2_SET_FLE_SG_EXT(op_fle); 1163 1164 /* o/p 1st seg */ 1165 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1166 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off); 1167 sge->length = mbuf->data_len - data_offset; 1168 1169 mbuf = mbuf->next; 1170 /* o/p segs */ 1171 while (mbuf) { 1172 sge++; 1173 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1174 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 1175 sge->length = mbuf->data_len; 1176 mbuf = mbuf->next; 1177 } 1178 DPAA2_SET_FLE_FIN(sge); 1179 1180 DPAA2_SEC_DP_DEBUG( 1181 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n", 1182 flc, fle, fle->addr_hi, fle->addr_lo, 1183 fle->length); 1184 1185 /* i/p fle */ 1186 mbuf = sym_op->m_src; 1187 sge++; 1188 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 1189 ip_fle->length = sess->iv.length + data_len; 1190 DPAA2_SET_FLE_SG_EXT(ip_fle); 1191 1192 /* i/p IV */ 1193 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1194 DPAA2_SET_FLE_OFFSET(sge, 0); 1195 sge->length = sess->iv.length; 1196 1197 sge++; 1198 1199 /* i/p 1st seg */ 1200 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1201 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off); 1202 sge->length = mbuf->data_len - data_offset; 1203 1204 mbuf = mbuf->next; 1205 /* i/p segs */ 1206 while (mbuf) { 1207 sge++; 1208 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1209 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 1210 sge->length = mbuf->data_len; 1211 mbuf = mbuf->next; 1212 } 1213 DPAA2_SET_FLE_FIN(sge); 1214 DPAA2_SET_FLE_FIN(ip_fle); 1215 1216 /* sg fd */ 1217 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 1218 DPAA2_SET_FD_LEN(fd, ip_fle->length); 1219 DPAA2_SET_FD_COMPOUND_FMT(fd); 1220 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1221 1222 DPAA2_SEC_DP_DEBUG( 1223 "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d" 1224 " off =%d, len =%d\n", 1225 DPAA2_GET_FD_ADDR(fd), 1226 DPAA2_GET_FD_BPID(fd), 1227 rte_dpaa2_bpid_info[bpid].meta_data_size, 1228 DPAA2_GET_FD_OFFSET(fd), 1229 DPAA2_GET_FD_LEN(fd)); 1230 return 0; 1231 } 1232 1233 static int 1234 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 1235 struct qbman_fd *fd, uint16_t bpid) 1236 { 1237 struct rte_crypto_sym_op *sym_op = op->sym; 1238 struct qbman_fle *fle, *sge; 1239 int retval, data_len, data_offset; 1240 struct sec_flow_context *flc; 1241 struct ctxt_priv *priv = sess->ctxt; 1242 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1243 sess->iv.offset); 1244 struct rte_mbuf *dst; 1245 1246 data_len = sym_op->cipher.data.length; 1247 data_offset = sym_op->cipher.data.offset; 1248 1249 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 1250 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 1251 if ((data_len & 7) || (data_offset & 7)) { 1252 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes"); 1253 return -1; 1254 } 1255 1256 data_len = data_len >> 3; 1257 data_offset = data_offset >> 3; 1258 } 1259 1260 if (sym_op->m_dst) 1261 dst = sym_op->m_dst; 1262 else 1263 dst = sym_op->m_src; 1264 1265 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 1266 if (retval) { 1267 DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE"); 1268 return -1; 1269 } 1270 memset(fle, 0, FLE_POOL_BUF_SIZE); 1271 /* TODO we are using the first FLE entry to store Mbuf. 1272 * Currently we donot know which FLE has the mbuf stored. 1273 * So while retreiving we can go back 1 FLE from the FD -ADDR 1274 * to get the MBUF Addr from the previous FLE. 1275 * We can have a better approach to use the inline Mbuf 1276 */ 1277 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 1278 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 1279 fle = fle + 1; 1280 sge = fle + 2; 1281 1282 if (likely(bpid < MAX_BPID)) { 1283 DPAA2_SET_FD_BPID(fd, bpid); 1284 DPAA2_SET_FLE_BPID(fle, bpid); 1285 DPAA2_SET_FLE_BPID(fle + 1, bpid); 1286 DPAA2_SET_FLE_BPID(sge, bpid); 1287 DPAA2_SET_FLE_BPID(sge + 1, bpid); 1288 } else { 1289 DPAA2_SET_FD_IVP(fd); 1290 DPAA2_SET_FLE_IVP(fle); 1291 DPAA2_SET_FLE_IVP((fle + 1)); 1292 DPAA2_SET_FLE_IVP(sge); 1293 DPAA2_SET_FLE_IVP((sge + 1)); 1294 } 1295 1296 flc = &priv->flc_desc[0].flc; 1297 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 1298 DPAA2_SET_FD_LEN(fd, data_len + sess->iv.length); 1299 DPAA2_SET_FD_COMPOUND_FMT(fd); 1300 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1301 1302 DPAA2_SEC_DP_DEBUG( 1303 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d," 1304 " data_off: 0x%x\n", 1305 data_offset, 1306 data_len, 1307 sess->iv.length, 1308 sym_op->m_src->data_off); 1309 1310 DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 1311 DPAA2_SET_FLE_OFFSET(fle, data_offset + dst->data_off); 1312 1313 fle->length = data_len + sess->iv.length; 1314 1315 DPAA2_SEC_DP_DEBUG( 1316 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n", 1317 flc, fle, fle->addr_hi, fle->addr_lo, 1318 fle->length); 1319 1320 fle++; 1321 1322 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 1323 fle->length = data_len + sess->iv.length; 1324 1325 DPAA2_SET_FLE_SG_EXT(fle); 1326 1327 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1328 sge->length = sess->iv.length; 1329 1330 sge++; 1331 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 1332 DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off); 1333 1334 sge->length = data_len; 1335 DPAA2_SET_FLE_FIN(sge); 1336 DPAA2_SET_FLE_FIN(fle); 1337 1338 DPAA2_SEC_DP_DEBUG( 1339 "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d" 1340 " off =%d, len =%d\n", 1341 DPAA2_GET_FD_ADDR(fd), 1342 DPAA2_GET_FD_BPID(fd), 1343 rte_dpaa2_bpid_info[bpid].meta_data_size, 1344 DPAA2_GET_FD_OFFSET(fd), 1345 DPAA2_GET_FD_LEN(fd)); 1346 1347 return 0; 1348 } 1349 1350 static inline int 1351 build_sec_fd(struct rte_crypto_op *op, 1352 struct qbman_fd *fd, uint16_t bpid) 1353 { 1354 int ret = -1; 1355 dpaa2_sec_session *sess; 1356 1357 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) 1358 sess = (dpaa2_sec_session *)get_sym_session_private_data( 1359 op->sym->session, cryptodev_driver_id); 1360 #ifdef RTE_LIBRTE_SECURITY 1361 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) 1362 sess = (dpaa2_sec_session *)get_sec_session_private_data( 1363 op->sym->sec_session); 1364 #endif 1365 else 1366 return -1; 1367 1368 if (!sess) 1369 return -1; 1370 1371 /* Any of the buffer is segmented*/ 1372 if (!rte_pktmbuf_is_contiguous(op->sym->m_src) || 1373 ((op->sym->m_dst != NULL) && 1374 !rte_pktmbuf_is_contiguous(op->sym->m_dst))) { 1375 switch (sess->ctxt_type) { 1376 case DPAA2_SEC_CIPHER: 1377 ret = build_cipher_sg_fd(sess, op, fd, bpid); 1378 break; 1379 case DPAA2_SEC_AUTH: 1380 ret = build_auth_sg_fd(sess, op, fd, bpid); 1381 break; 1382 case DPAA2_SEC_AEAD: 1383 ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid); 1384 break; 1385 case DPAA2_SEC_CIPHER_HASH: 1386 ret = build_authenc_sg_fd(sess, op, fd, bpid); 1387 break; 1388 #ifdef RTE_LIBRTE_SECURITY 1389 case DPAA2_SEC_IPSEC: 1390 case DPAA2_SEC_PDCP: 1391 ret = build_proto_compound_sg_fd(sess, op, fd, bpid); 1392 break; 1393 #endif 1394 case DPAA2_SEC_HASH_CIPHER: 1395 default: 1396 DPAA2_SEC_ERR("error: Unsupported session"); 1397 } 1398 } else { 1399 switch (sess->ctxt_type) { 1400 case DPAA2_SEC_CIPHER: 1401 ret = build_cipher_fd(sess, op, fd, bpid); 1402 break; 1403 case DPAA2_SEC_AUTH: 1404 ret = build_auth_fd(sess, op, fd, bpid); 1405 break; 1406 case DPAA2_SEC_AEAD: 1407 ret = build_authenc_gcm_fd(sess, op, fd, bpid); 1408 break; 1409 case DPAA2_SEC_CIPHER_HASH: 1410 ret = build_authenc_fd(sess, op, fd, bpid); 1411 break; 1412 #ifdef RTE_LIBRTE_SECURITY 1413 case DPAA2_SEC_IPSEC: 1414 ret = build_proto_fd(sess, op, fd, bpid); 1415 break; 1416 case DPAA2_SEC_PDCP: 1417 ret = build_proto_compound_fd(sess, op, fd, bpid); 1418 break; 1419 #endif 1420 case DPAA2_SEC_HASH_CIPHER: 1421 default: 1422 DPAA2_SEC_ERR("error: Unsupported session"); 1423 } 1424 } 1425 return ret; 1426 } 1427 1428 static uint16_t 1429 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, 1430 uint16_t nb_ops) 1431 { 1432 /* Function to transmit the frames to given device and VQ*/ 1433 uint32_t loop; 1434 int32_t ret; 1435 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 1436 uint32_t frames_to_send, retry_count; 1437 struct qbman_eq_desc eqdesc; 1438 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 1439 struct qbman_swp *swp; 1440 uint16_t num_tx = 0; 1441 uint32_t flags[MAX_TX_RING_SLOTS] = {0}; 1442 /*todo - need to support multiple buffer pools */ 1443 uint16_t bpid; 1444 struct rte_mempool *mb_pool; 1445 1446 if (unlikely(nb_ops == 0)) 1447 return 0; 1448 1449 if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { 1450 DPAA2_SEC_ERR("sessionless crypto op not supported"); 1451 return 0; 1452 } 1453 /*Prepare enqueue descriptor*/ 1454 qbman_eq_desc_clear(&eqdesc); 1455 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); 1456 qbman_eq_desc_set_response(&eqdesc, 0, 0); 1457 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid); 1458 1459 if (!DPAA2_PER_LCORE_DPIO) { 1460 ret = dpaa2_affine_qbman_swp(); 1461 if (ret) { 1462 DPAA2_SEC_ERR("Failure in affining portal"); 1463 return 0; 1464 } 1465 } 1466 swp = DPAA2_PER_LCORE_PORTAL; 1467 1468 while (nb_ops) { 1469 frames_to_send = (nb_ops > dpaa2_eqcr_size) ? 1470 dpaa2_eqcr_size : nb_ops; 1471 1472 for (loop = 0; loop < frames_to_send; loop++) { 1473 if ((*ops)->sym->m_src->seqn) { 1474 uint8_t dqrr_index = (*ops)->sym->m_src->seqn - 1; 1475 1476 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index; 1477 DPAA2_PER_LCORE_DQRR_SIZE--; 1478 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index); 1479 (*ops)->sym->m_src->seqn = DPAA2_INVALID_MBUF_SEQN; 1480 } 1481 1482 /*Clear the unused FD fields before sending*/ 1483 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd)); 1484 mb_pool = (*ops)->sym->m_src->pool; 1485 bpid = mempool_to_bpid(mb_pool); 1486 ret = build_sec_fd(*ops, &fd_arr[loop], bpid); 1487 if (ret) { 1488 DPAA2_SEC_ERR("error: Improper packet contents" 1489 " for crypto operation"); 1490 goto skip_tx; 1491 } 1492 ops++; 1493 } 1494 1495 loop = 0; 1496 retry_count = 0; 1497 while (loop < frames_to_send) { 1498 ret = qbman_swp_enqueue_multiple(swp, &eqdesc, 1499 &fd_arr[loop], 1500 &flags[loop], 1501 frames_to_send - loop); 1502 if (unlikely(ret < 0)) { 1503 retry_count++; 1504 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) { 1505 num_tx += loop; 1506 nb_ops -= loop; 1507 goto skip_tx; 1508 } 1509 } else { 1510 loop += ret; 1511 retry_count = 0; 1512 } 1513 } 1514 1515 num_tx += loop; 1516 nb_ops -= loop; 1517 } 1518 skip_tx: 1519 dpaa2_qp->tx_vq.tx_pkts += num_tx; 1520 dpaa2_qp->tx_vq.err_pkts += nb_ops; 1521 return num_tx; 1522 } 1523 1524 #ifdef RTE_LIBRTE_SECURITY 1525 static inline struct rte_crypto_op * 1526 sec_simple_fd_to_mbuf(const struct qbman_fd *fd) 1527 { 1528 struct rte_crypto_op *op; 1529 uint16_t len = DPAA2_GET_FD_LEN(fd); 1530 int16_t diff = 0; 1531 dpaa2_sec_session *sess_priv __rte_unused; 1532 1533 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF( 1534 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)), 1535 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 1536 1537 diff = len - mbuf->pkt_len; 1538 mbuf->pkt_len += diff; 1539 mbuf->data_len += diff; 1540 op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova; 1541 mbuf->buf_iova = op->sym->aead.digest.phys_addr; 1542 op->sym->aead.digest.phys_addr = 0L; 1543 1544 sess_priv = (dpaa2_sec_session *)get_sec_session_private_data( 1545 op->sym->sec_session); 1546 if (sess_priv->dir == DIR_ENC) 1547 mbuf->data_off += SEC_FLC_DHR_OUTBOUND; 1548 else 1549 mbuf->data_off += SEC_FLC_DHR_INBOUND; 1550 1551 return op; 1552 } 1553 #endif 1554 1555 static inline struct rte_crypto_op * 1556 sec_fd_to_mbuf(const struct qbman_fd *fd) 1557 { 1558 struct qbman_fle *fle; 1559 struct rte_crypto_op *op; 1560 struct ctxt_priv *priv; 1561 struct rte_mbuf *dst, *src; 1562 1563 #ifdef RTE_LIBRTE_SECURITY 1564 if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single) 1565 return sec_simple_fd_to_mbuf(fd); 1566 #endif 1567 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); 1568 1569 DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n", 1570 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset); 1571 1572 /* we are using the first FLE entry to store Mbuf. 1573 * Currently we donot know which FLE has the mbuf stored. 1574 * So while retreiving we can go back 1 FLE from the FD -ADDR 1575 * to get the MBUF Addr from the previous FLE. 1576 * We can have a better approach to use the inline Mbuf 1577 */ 1578 1579 if (unlikely(DPAA2_GET_FD_IVP(fd))) { 1580 /* TODO complete it. */ 1581 DPAA2_SEC_ERR("error: non inline buffer"); 1582 return NULL; 1583 } 1584 op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1)); 1585 1586 /* Prefeth op */ 1587 src = op->sym->m_src; 1588 rte_prefetch0(src); 1589 1590 if (op->sym->m_dst) { 1591 dst = op->sym->m_dst; 1592 rte_prefetch0(dst); 1593 } else 1594 dst = src; 1595 1596 #ifdef RTE_LIBRTE_SECURITY 1597 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 1598 uint16_t len = DPAA2_GET_FD_LEN(fd); 1599 dst->pkt_len = len; 1600 while (dst->next != NULL) { 1601 len -= dst->data_len; 1602 dst = dst->next; 1603 } 1604 dst->data_len = len; 1605 } 1606 #endif 1607 DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p," 1608 " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n", 1609 (void *)dst, 1610 dst->buf_addr, 1611 DPAA2_GET_FD_ADDR(fd), 1612 DPAA2_GET_FD_BPID(fd), 1613 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, 1614 DPAA2_GET_FD_OFFSET(fd), 1615 DPAA2_GET_FD_LEN(fd)); 1616 1617 /* free the fle memory */ 1618 if (likely(rte_pktmbuf_is_contiguous(src))) { 1619 priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1); 1620 rte_mempool_put(priv->fle_pool, (void *)(fle-1)); 1621 } else 1622 rte_free((void *)(fle-1)); 1623 1624 return op; 1625 } 1626 1627 static uint16_t 1628 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, 1629 uint16_t nb_ops) 1630 { 1631 /* Function is responsible to receive frames for a given device and VQ*/ 1632 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 1633 struct qbman_result *dq_storage; 1634 uint32_t fqid = dpaa2_qp->rx_vq.fqid; 1635 int ret, num_rx = 0; 1636 uint8_t is_last = 0, status; 1637 struct qbman_swp *swp; 1638 const struct qbman_fd *fd; 1639 struct qbman_pull_desc pulldesc; 1640 1641 if (!DPAA2_PER_LCORE_DPIO) { 1642 ret = dpaa2_affine_qbman_swp(); 1643 if (ret) { 1644 DPAA2_SEC_ERR("Failure in affining portal"); 1645 return 0; 1646 } 1647 } 1648 swp = DPAA2_PER_LCORE_PORTAL; 1649 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0]; 1650 1651 qbman_pull_desc_clear(&pulldesc); 1652 qbman_pull_desc_set_numframes(&pulldesc, 1653 (nb_ops > dpaa2_dqrr_size) ? 1654 dpaa2_dqrr_size : nb_ops); 1655 qbman_pull_desc_set_fq(&pulldesc, fqid); 1656 qbman_pull_desc_set_storage(&pulldesc, dq_storage, 1657 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage), 1658 1); 1659 1660 /*Issue a volatile dequeue command. */ 1661 while (1) { 1662 if (qbman_swp_pull(swp, &pulldesc)) { 1663 DPAA2_SEC_WARN( 1664 "SEC VDQ command is not issued : QBMAN busy"); 1665 /* Portal was busy, try again */ 1666 continue; 1667 } 1668 break; 1669 }; 1670 1671 /* Receive the packets till Last Dequeue entry is found with 1672 * respect to the above issues PULL command. 1673 */ 1674 while (!is_last) { 1675 /* Check if the previous issued command is completed. 1676 * Also seems like the SWP is shared between the Ethernet Driver 1677 * and the SEC driver. 1678 */ 1679 while (!qbman_check_command_complete(dq_storage)) 1680 ; 1681 1682 /* Loop until the dq_storage is updated with 1683 * new token by QBMAN 1684 */ 1685 while (!qbman_check_new_result(dq_storage)) 1686 ; 1687 /* Check whether Last Pull command is Expired and 1688 * setting Condition for Loop termination 1689 */ 1690 if (qbman_result_DQ_is_pull_complete(dq_storage)) { 1691 is_last = 1; 1692 /* Check for valid frame. */ 1693 status = (uint8_t)qbman_result_DQ_flags(dq_storage); 1694 if (unlikely( 1695 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) { 1696 DPAA2_SEC_DP_DEBUG("No frame is delivered\n"); 1697 continue; 1698 } 1699 } 1700 1701 fd = qbman_result_DQ_fd(dq_storage); 1702 ops[num_rx] = sec_fd_to_mbuf(fd); 1703 1704 if (unlikely(fd->simple.frc)) { 1705 /* TODO Parse SEC errors */ 1706 DPAA2_SEC_ERR("SEC returned Error - %x", 1707 fd->simple.frc); 1708 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR; 1709 } else { 1710 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 1711 } 1712 1713 num_rx++; 1714 dq_storage++; 1715 } /* End of Packet Rx loop */ 1716 1717 dpaa2_qp->rx_vq.rx_pkts += num_rx; 1718 1719 DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx); 1720 /*Return the total number of packets received to DPAA2 app*/ 1721 return num_rx; 1722 } 1723 1724 /** Release queue pair */ 1725 static int 1726 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) 1727 { 1728 struct dpaa2_sec_qp *qp = 1729 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id]; 1730 1731 PMD_INIT_FUNC_TRACE(); 1732 1733 if (qp->rx_vq.q_storage) { 1734 dpaa2_free_dq_storage(qp->rx_vq.q_storage); 1735 rte_free(qp->rx_vq.q_storage); 1736 } 1737 rte_free(qp); 1738 1739 dev->data->queue_pairs[queue_pair_id] = NULL; 1740 1741 return 0; 1742 } 1743 1744 /** Setup a queue pair */ 1745 static int 1746 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, 1747 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf, 1748 __rte_unused int socket_id) 1749 { 1750 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 1751 struct dpaa2_sec_qp *qp; 1752 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 1753 struct dpseci_rx_queue_cfg cfg; 1754 int32_t retcode; 1755 1756 PMD_INIT_FUNC_TRACE(); 1757 1758 /* If qp is already in use free ring memory and qp metadata. */ 1759 if (dev->data->queue_pairs[qp_id] != NULL) { 1760 DPAA2_SEC_INFO("QP already setup"); 1761 return 0; 1762 } 1763 1764 DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p", 1765 dev, qp_id, qp_conf); 1766 1767 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 1768 1769 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp), 1770 RTE_CACHE_LINE_SIZE); 1771 if (!qp) { 1772 DPAA2_SEC_ERR("malloc failed for rx/tx queues"); 1773 return -1; 1774 } 1775 1776 qp->rx_vq.crypto_data = dev->data; 1777 qp->tx_vq.crypto_data = dev->data; 1778 qp->rx_vq.q_storage = rte_malloc("sec dq storage", 1779 sizeof(struct queue_storage_info_t), 1780 RTE_CACHE_LINE_SIZE); 1781 if (!qp->rx_vq.q_storage) { 1782 DPAA2_SEC_ERR("malloc failed for q_storage"); 1783 return -1; 1784 } 1785 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t)); 1786 1787 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) { 1788 DPAA2_SEC_ERR("Unable to allocate dequeue storage"); 1789 return -1; 1790 } 1791 1792 dev->data->queue_pairs[qp_id] = qp; 1793 1794 cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX; 1795 cfg.user_ctx = (size_t)(&qp->rx_vq); 1796 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 1797 qp_id, &cfg); 1798 return retcode; 1799 } 1800 1801 /** Return the number of allocated queue pairs */ 1802 static uint32_t 1803 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev) 1804 { 1805 PMD_INIT_FUNC_TRACE(); 1806 1807 return dev->data->nb_queue_pairs; 1808 } 1809 1810 /** Returns the size of the aesni gcm session structure */ 1811 static unsigned int 1812 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) 1813 { 1814 PMD_INIT_FUNC_TRACE(); 1815 1816 return sizeof(dpaa2_sec_session); 1817 } 1818 1819 static int 1820 dpaa2_sec_cipher_init(struct rte_cryptodev *dev, 1821 struct rte_crypto_sym_xform *xform, 1822 dpaa2_sec_session *session) 1823 { 1824 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1825 struct alginfo cipherdata; 1826 int bufsize; 1827 struct ctxt_priv *priv; 1828 struct sec_flow_context *flc; 1829 1830 PMD_INIT_FUNC_TRACE(); 1831 1832 /* For SEC CIPHER only one descriptor is required. */ 1833 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1834 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 1835 RTE_CACHE_LINE_SIZE); 1836 if (priv == NULL) { 1837 DPAA2_SEC_ERR("No Memory for priv CTXT"); 1838 return -1; 1839 } 1840 1841 priv->fle_pool = dev_priv->fle_pool; 1842 1843 flc = &priv->flc_desc[0].flc; 1844 1845 session->ctxt_type = DPAA2_SEC_CIPHER; 1846 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, 1847 RTE_CACHE_LINE_SIZE); 1848 if (session->cipher_key.data == NULL) { 1849 DPAA2_SEC_ERR("No Memory for cipher key"); 1850 rte_free(priv); 1851 return -1; 1852 } 1853 session->cipher_key.length = xform->cipher.key.length; 1854 1855 memcpy(session->cipher_key.data, xform->cipher.key.data, 1856 xform->cipher.key.length); 1857 cipherdata.key = (size_t)session->cipher_key.data; 1858 cipherdata.keylen = session->cipher_key.length; 1859 cipherdata.key_enc_flags = 0; 1860 cipherdata.key_type = RTA_DATA_IMM; 1861 1862 /* Set IV parameters */ 1863 session->iv.offset = xform->cipher.iv.offset; 1864 session->iv.length = xform->cipher.iv.length; 1865 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1866 DIR_ENC : DIR_DEC; 1867 1868 switch (xform->cipher.algo) { 1869 case RTE_CRYPTO_CIPHER_AES_CBC: 1870 cipherdata.algtype = OP_ALG_ALGSEL_AES; 1871 cipherdata.algmode = OP_ALG_AAI_CBC; 1872 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 1873 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 1874 SHR_NEVER, &cipherdata, 1875 session->iv.length, 1876 session->dir); 1877 break; 1878 case RTE_CRYPTO_CIPHER_3DES_CBC: 1879 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 1880 cipherdata.algmode = OP_ALG_AAI_CBC; 1881 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 1882 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 1883 SHR_NEVER, &cipherdata, 1884 session->iv.length, 1885 session->dir); 1886 break; 1887 case RTE_CRYPTO_CIPHER_AES_CTR: 1888 cipherdata.algtype = OP_ALG_ALGSEL_AES; 1889 cipherdata.algmode = OP_ALG_AAI_CTR; 1890 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 1891 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 1892 SHR_NEVER, &cipherdata, 1893 session->iv.length, 1894 session->dir); 1895 break; 1896 case RTE_CRYPTO_CIPHER_3DES_CTR: 1897 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 1898 cipherdata.algmode = OP_ALG_AAI_CTR; 1899 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CTR; 1900 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 1901 SHR_NEVER, &cipherdata, 1902 session->iv.length, 1903 session->dir); 1904 break; 1905 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 1906 cipherdata.algtype = OP_ALG_ALGSEL_SNOW_F8; 1907 session->cipher_alg = RTE_CRYPTO_CIPHER_SNOW3G_UEA2; 1908 bufsize = cnstr_shdsc_snow_f8(priv->flc_desc[0].desc, 1, 0, 1909 &cipherdata, 1910 session->dir); 1911 break; 1912 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 1913 cipherdata.algtype = OP_ALG_ALGSEL_ZUCE; 1914 session->cipher_alg = RTE_CRYPTO_CIPHER_ZUC_EEA3; 1915 bufsize = cnstr_shdsc_zuce(priv->flc_desc[0].desc, 1, 0, 1916 &cipherdata, 1917 session->dir); 1918 break; 1919 case RTE_CRYPTO_CIPHER_KASUMI_F8: 1920 case RTE_CRYPTO_CIPHER_AES_F8: 1921 case RTE_CRYPTO_CIPHER_AES_ECB: 1922 case RTE_CRYPTO_CIPHER_3DES_ECB: 1923 case RTE_CRYPTO_CIPHER_AES_XTS: 1924 case RTE_CRYPTO_CIPHER_ARC4: 1925 case RTE_CRYPTO_CIPHER_NULL: 1926 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", 1927 xform->cipher.algo); 1928 goto error_out; 1929 default: 1930 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 1931 xform->cipher.algo); 1932 goto error_out; 1933 } 1934 1935 if (bufsize < 0) { 1936 DPAA2_SEC_ERR("Crypto: Descriptor build failed"); 1937 goto error_out; 1938 } 1939 1940 flc->word1_sdl = (uint8_t)bufsize; 1941 session->ctxt = priv; 1942 1943 #ifdef CAAM_DESC_DEBUG 1944 int i; 1945 for (i = 0; i < bufsize; i++) 1946 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]); 1947 #endif 1948 return 0; 1949 1950 error_out: 1951 rte_free(session->cipher_key.data); 1952 rte_free(priv); 1953 return -1; 1954 } 1955 1956 static int 1957 dpaa2_sec_auth_init(struct rte_cryptodev *dev, 1958 struct rte_crypto_sym_xform *xform, 1959 dpaa2_sec_session *session) 1960 { 1961 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1962 struct alginfo authdata; 1963 int bufsize; 1964 struct ctxt_priv *priv; 1965 struct sec_flow_context *flc; 1966 1967 PMD_INIT_FUNC_TRACE(); 1968 1969 /* For SEC AUTH three descriptors are required for various stages */ 1970 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1971 sizeof(struct ctxt_priv) + 3 * 1972 sizeof(struct sec_flc_desc), 1973 RTE_CACHE_LINE_SIZE); 1974 if (priv == NULL) { 1975 DPAA2_SEC_ERR("No Memory for priv CTXT"); 1976 return -1; 1977 } 1978 1979 priv->fle_pool = dev_priv->fle_pool; 1980 flc = &priv->flc_desc[DESC_INITFINAL].flc; 1981 1982 session->ctxt_type = DPAA2_SEC_AUTH; 1983 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length, 1984 RTE_CACHE_LINE_SIZE); 1985 if (session->auth_key.data == NULL) { 1986 DPAA2_SEC_ERR("Unable to allocate memory for auth key"); 1987 rte_free(priv); 1988 return -1; 1989 } 1990 session->auth_key.length = xform->auth.key.length; 1991 1992 memcpy(session->auth_key.data, xform->auth.key.data, 1993 xform->auth.key.length); 1994 authdata.key = (size_t)session->auth_key.data; 1995 authdata.keylen = session->auth_key.length; 1996 authdata.key_enc_flags = 0; 1997 authdata.key_type = RTA_DATA_IMM; 1998 1999 session->digest_length = xform->auth.digest_length; 2000 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? 2001 DIR_ENC : DIR_DEC; 2002 2003 switch (xform->auth.algo) { 2004 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2005 authdata.algtype = OP_ALG_ALGSEL_SHA1; 2006 authdata.algmode = OP_ALG_AAI_HMAC; 2007 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 2008 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2009 1, 0, SHR_NEVER, &authdata, 2010 !session->dir, 2011 session->digest_length); 2012 break; 2013 case RTE_CRYPTO_AUTH_MD5_HMAC: 2014 authdata.algtype = OP_ALG_ALGSEL_MD5; 2015 authdata.algmode = OP_ALG_AAI_HMAC; 2016 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 2017 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2018 1, 0, SHR_NEVER, &authdata, 2019 !session->dir, 2020 session->digest_length); 2021 break; 2022 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2023 authdata.algtype = OP_ALG_ALGSEL_SHA256; 2024 authdata.algmode = OP_ALG_AAI_HMAC; 2025 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 2026 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2027 1, 0, SHR_NEVER, &authdata, 2028 !session->dir, 2029 session->digest_length); 2030 break; 2031 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2032 authdata.algtype = OP_ALG_ALGSEL_SHA384; 2033 authdata.algmode = OP_ALG_AAI_HMAC; 2034 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 2035 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2036 1, 0, SHR_NEVER, &authdata, 2037 !session->dir, 2038 session->digest_length); 2039 break; 2040 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2041 authdata.algtype = OP_ALG_ALGSEL_SHA512; 2042 authdata.algmode = OP_ALG_AAI_HMAC; 2043 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 2044 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2045 1, 0, SHR_NEVER, &authdata, 2046 !session->dir, 2047 session->digest_length); 2048 break; 2049 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2050 authdata.algtype = OP_ALG_ALGSEL_SHA224; 2051 authdata.algmode = OP_ALG_AAI_HMAC; 2052 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 2053 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2054 1, 0, SHR_NEVER, &authdata, 2055 !session->dir, 2056 session->digest_length); 2057 break; 2058 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2059 authdata.algtype = OP_ALG_ALGSEL_SNOW_F9; 2060 authdata.algmode = OP_ALG_AAI_F9; 2061 session->auth_alg = RTE_CRYPTO_AUTH_SNOW3G_UIA2; 2062 session->iv.offset = xform->auth.iv.offset; 2063 session->iv.length = xform->auth.iv.length; 2064 bufsize = cnstr_shdsc_snow_f9(priv->flc_desc[DESC_INITFINAL].desc, 2065 1, 0, &authdata, 2066 !session->dir, 2067 session->digest_length); 2068 break; 2069 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2070 authdata.algtype = OP_ALG_ALGSEL_ZUCA; 2071 authdata.algmode = OP_ALG_AAI_F9; 2072 session->auth_alg = RTE_CRYPTO_AUTH_ZUC_EIA3; 2073 session->iv.offset = xform->auth.iv.offset; 2074 session->iv.length = xform->auth.iv.length; 2075 bufsize = cnstr_shdsc_zuca(priv->flc_desc[DESC_INITFINAL].desc, 2076 1, 0, &authdata, 2077 !session->dir, 2078 session->digest_length); 2079 break; 2080 case RTE_CRYPTO_AUTH_KASUMI_F9: 2081 case RTE_CRYPTO_AUTH_NULL: 2082 case RTE_CRYPTO_AUTH_SHA1: 2083 case RTE_CRYPTO_AUTH_SHA256: 2084 case RTE_CRYPTO_AUTH_SHA512: 2085 case RTE_CRYPTO_AUTH_SHA224: 2086 case RTE_CRYPTO_AUTH_SHA384: 2087 case RTE_CRYPTO_AUTH_MD5: 2088 case RTE_CRYPTO_AUTH_AES_GMAC: 2089 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2090 case RTE_CRYPTO_AUTH_AES_CMAC: 2091 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 2092 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un", 2093 xform->auth.algo); 2094 goto error_out; 2095 default: 2096 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", 2097 xform->auth.algo); 2098 goto error_out; 2099 } 2100 2101 if (bufsize < 0) { 2102 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2103 goto error_out; 2104 } 2105 2106 flc->word1_sdl = (uint8_t)bufsize; 2107 session->ctxt = priv; 2108 #ifdef CAAM_DESC_DEBUG 2109 int i; 2110 for (i = 0; i < bufsize; i++) 2111 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", 2112 i, priv->flc_desc[DESC_INITFINAL].desc[i]); 2113 #endif 2114 2115 return 0; 2116 2117 error_out: 2118 rte_free(session->auth_key.data); 2119 rte_free(priv); 2120 return -1; 2121 } 2122 2123 static int 2124 dpaa2_sec_aead_init(struct rte_cryptodev *dev, 2125 struct rte_crypto_sym_xform *xform, 2126 dpaa2_sec_session *session) 2127 { 2128 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt; 2129 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 2130 struct alginfo aeaddata; 2131 int bufsize; 2132 struct ctxt_priv *priv; 2133 struct sec_flow_context *flc; 2134 struct rte_crypto_aead_xform *aead_xform = &xform->aead; 2135 int err; 2136 2137 PMD_INIT_FUNC_TRACE(); 2138 2139 /* Set IV parameters */ 2140 session->iv.offset = aead_xform->iv.offset; 2141 session->iv.length = aead_xform->iv.length; 2142 session->ctxt_type = DPAA2_SEC_AEAD; 2143 2144 /* For SEC AEAD only one descriptor is required */ 2145 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2146 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 2147 RTE_CACHE_LINE_SIZE); 2148 if (priv == NULL) { 2149 DPAA2_SEC_ERR("No Memory for priv CTXT"); 2150 return -1; 2151 } 2152 2153 priv->fle_pool = dev_priv->fle_pool; 2154 flc = &priv->flc_desc[0].flc; 2155 2156 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 2157 RTE_CACHE_LINE_SIZE); 2158 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 2159 DPAA2_SEC_ERR("No Memory for aead key"); 2160 rte_free(priv); 2161 return -1; 2162 } 2163 memcpy(session->aead_key.data, aead_xform->key.data, 2164 aead_xform->key.length); 2165 2166 session->digest_length = aead_xform->digest_length; 2167 session->aead_key.length = aead_xform->key.length; 2168 ctxt->auth_only_len = aead_xform->aad_length; 2169 2170 aeaddata.key = (size_t)session->aead_key.data; 2171 aeaddata.keylen = session->aead_key.length; 2172 aeaddata.key_enc_flags = 0; 2173 aeaddata.key_type = RTA_DATA_IMM; 2174 2175 switch (aead_xform->algo) { 2176 case RTE_CRYPTO_AEAD_AES_GCM: 2177 aeaddata.algtype = OP_ALG_ALGSEL_AES; 2178 aeaddata.algmode = OP_ALG_AAI_GCM; 2179 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; 2180 break; 2181 case RTE_CRYPTO_AEAD_AES_CCM: 2182 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u", 2183 aead_xform->algo); 2184 goto error_out; 2185 default: 2186 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u", 2187 aead_xform->algo); 2188 goto error_out; 2189 } 2190 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 2191 DIR_ENC : DIR_DEC; 2192 2193 priv->flc_desc[0].desc[0] = aeaddata.keylen; 2194 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 2195 MIN_JOB_DESC_SIZE, 2196 (unsigned int *)priv->flc_desc[0].desc, 2197 &priv->flc_desc[0].desc[1], 1); 2198 2199 if (err < 0) { 2200 DPAA2_SEC_ERR("Crypto: Incorrect key lengths"); 2201 goto error_out; 2202 } 2203 if (priv->flc_desc[0].desc[1] & 1) { 2204 aeaddata.key_type = RTA_DATA_IMM; 2205 } else { 2206 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key); 2207 aeaddata.key_type = RTA_DATA_PTR; 2208 } 2209 priv->flc_desc[0].desc[0] = 0; 2210 priv->flc_desc[0].desc[1] = 0; 2211 2212 if (session->dir == DIR_ENC) 2213 bufsize = cnstr_shdsc_gcm_encap( 2214 priv->flc_desc[0].desc, 1, 0, SHR_NEVER, 2215 &aeaddata, session->iv.length, 2216 session->digest_length); 2217 else 2218 bufsize = cnstr_shdsc_gcm_decap( 2219 priv->flc_desc[0].desc, 1, 0, SHR_NEVER, 2220 &aeaddata, session->iv.length, 2221 session->digest_length); 2222 if (bufsize < 0) { 2223 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2224 goto error_out; 2225 } 2226 2227 flc->word1_sdl = (uint8_t)bufsize; 2228 session->ctxt = priv; 2229 #ifdef CAAM_DESC_DEBUG 2230 int i; 2231 for (i = 0; i < bufsize; i++) 2232 DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n", 2233 i, priv->flc_desc[0].desc[i]); 2234 #endif 2235 return 0; 2236 2237 error_out: 2238 rte_free(session->aead_key.data); 2239 rte_free(priv); 2240 return -1; 2241 } 2242 2243 2244 static int 2245 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, 2246 struct rte_crypto_sym_xform *xform, 2247 dpaa2_sec_session *session) 2248 { 2249 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 2250 struct alginfo authdata, cipherdata; 2251 int bufsize; 2252 struct ctxt_priv *priv; 2253 struct sec_flow_context *flc; 2254 struct rte_crypto_cipher_xform *cipher_xform; 2255 struct rte_crypto_auth_xform *auth_xform; 2256 int err; 2257 2258 PMD_INIT_FUNC_TRACE(); 2259 2260 if (session->ext_params.aead_ctxt.auth_cipher_text) { 2261 cipher_xform = &xform->cipher; 2262 auth_xform = &xform->next->auth; 2263 session->ctxt_type = 2264 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2265 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER; 2266 } else { 2267 cipher_xform = &xform->next->cipher; 2268 auth_xform = &xform->auth; 2269 session->ctxt_type = 2270 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2271 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH; 2272 } 2273 2274 /* Set IV parameters */ 2275 session->iv.offset = cipher_xform->iv.offset; 2276 session->iv.length = cipher_xform->iv.length; 2277 2278 /* For SEC AEAD only one descriptor is required */ 2279 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2280 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 2281 RTE_CACHE_LINE_SIZE); 2282 if (priv == NULL) { 2283 DPAA2_SEC_ERR("No Memory for priv CTXT"); 2284 return -1; 2285 } 2286 2287 priv->fle_pool = dev_priv->fle_pool; 2288 flc = &priv->flc_desc[0].flc; 2289 2290 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length, 2291 RTE_CACHE_LINE_SIZE); 2292 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) { 2293 DPAA2_SEC_ERR("No Memory for cipher key"); 2294 rte_free(priv); 2295 return -1; 2296 } 2297 session->cipher_key.length = cipher_xform->key.length; 2298 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length, 2299 RTE_CACHE_LINE_SIZE); 2300 if (session->auth_key.data == NULL && auth_xform->key.length > 0) { 2301 DPAA2_SEC_ERR("No Memory for auth key"); 2302 rte_free(session->cipher_key.data); 2303 rte_free(priv); 2304 return -1; 2305 } 2306 session->auth_key.length = auth_xform->key.length; 2307 memcpy(session->cipher_key.data, cipher_xform->key.data, 2308 cipher_xform->key.length); 2309 memcpy(session->auth_key.data, auth_xform->key.data, 2310 auth_xform->key.length); 2311 2312 authdata.key = (size_t)session->auth_key.data; 2313 authdata.keylen = session->auth_key.length; 2314 authdata.key_enc_flags = 0; 2315 authdata.key_type = RTA_DATA_IMM; 2316 2317 session->digest_length = auth_xform->digest_length; 2318 2319 switch (auth_xform->algo) { 2320 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2321 authdata.algtype = OP_ALG_ALGSEL_SHA1; 2322 authdata.algmode = OP_ALG_AAI_HMAC; 2323 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 2324 break; 2325 case RTE_CRYPTO_AUTH_MD5_HMAC: 2326 authdata.algtype = OP_ALG_ALGSEL_MD5; 2327 authdata.algmode = OP_ALG_AAI_HMAC; 2328 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 2329 break; 2330 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2331 authdata.algtype = OP_ALG_ALGSEL_SHA224; 2332 authdata.algmode = OP_ALG_AAI_HMAC; 2333 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 2334 break; 2335 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2336 authdata.algtype = OP_ALG_ALGSEL_SHA256; 2337 authdata.algmode = OP_ALG_AAI_HMAC; 2338 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 2339 break; 2340 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2341 authdata.algtype = OP_ALG_ALGSEL_SHA384; 2342 authdata.algmode = OP_ALG_AAI_HMAC; 2343 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 2344 break; 2345 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2346 authdata.algtype = OP_ALG_ALGSEL_SHA512; 2347 authdata.algmode = OP_ALG_AAI_HMAC; 2348 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 2349 break; 2350 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2351 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2352 case RTE_CRYPTO_AUTH_NULL: 2353 case RTE_CRYPTO_AUTH_SHA1: 2354 case RTE_CRYPTO_AUTH_SHA256: 2355 case RTE_CRYPTO_AUTH_SHA512: 2356 case RTE_CRYPTO_AUTH_SHA224: 2357 case RTE_CRYPTO_AUTH_SHA384: 2358 case RTE_CRYPTO_AUTH_MD5: 2359 case RTE_CRYPTO_AUTH_AES_GMAC: 2360 case RTE_CRYPTO_AUTH_KASUMI_F9: 2361 case RTE_CRYPTO_AUTH_AES_CMAC: 2362 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 2363 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2364 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 2365 auth_xform->algo); 2366 goto error_out; 2367 default: 2368 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", 2369 auth_xform->algo); 2370 goto error_out; 2371 } 2372 cipherdata.key = (size_t)session->cipher_key.data; 2373 cipherdata.keylen = session->cipher_key.length; 2374 cipherdata.key_enc_flags = 0; 2375 cipherdata.key_type = RTA_DATA_IMM; 2376 2377 switch (cipher_xform->algo) { 2378 case RTE_CRYPTO_CIPHER_AES_CBC: 2379 cipherdata.algtype = OP_ALG_ALGSEL_AES; 2380 cipherdata.algmode = OP_ALG_AAI_CBC; 2381 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 2382 break; 2383 case RTE_CRYPTO_CIPHER_3DES_CBC: 2384 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 2385 cipherdata.algmode = OP_ALG_AAI_CBC; 2386 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 2387 break; 2388 case RTE_CRYPTO_CIPHER_AES_CTR: 2389 cipherdata.algtype = OP_ALG_ALGSEL_AES; 2390 cipherdata.algmode = OP_ALG_AAI_CTR; 2391 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 2392 break; 2393 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2394 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2395 case RTE_CRYPTO_CIPHER_NULL: 2396 case RTE_CRYPTO_CIPHER_3DES_ECB: 2397 case RTE_CRYPTO_CIPHER_AES_ECB: 2398 case RTE_CRYPTO_CIPHER_KASUMI_F8: 2399 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", 2400 cipher_xform->algo); 2401 goto error_out; 2402 default: 2403 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 2404 cipher_xform->algo); 2405 goto error_out; 2406 } 2407 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2408 DIR_ENC : DIR_DEC; 2409 2410 priv->flc_desc[0].desc[0] = cipherdata.keylen; 2411 priv->flc_desc[0].desc[1] = authdata.keylen; 2412 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 2413 MIN_JOB_DESC_SIZE, 2414 (unsigned int *)priv->flc_desc[0].desc, 2415 &priv->flc_desc[0].desc[2], 2); 2416 2417 if (err < 0) { 2418 DPAA2_SEC_ERR("Crypto: Incorrect key lengths"); 2419 goto error_out; 2420 } 2421 if (priv->flc_desc[0].desc[2] & 1) { 2422 cipherdata.key_type = RTA_DATA_IMM; 2423 } else { 2424 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); 2425 cipherdata.key_type = RTA_DATA_PTR; 2426 } 2427 if (priv->flc_desc[0].desc[2] & (1 << 1)) { 2428 authdata.key_type = RTA_DATA_IMM; 2429 } else { 2430 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key); 2431 authdata.key_type = RTA_DATA_PTR; 2432 } 2433 priv->flc_desc[0].desc[0] = 0; 2434 priv->flc_desc[0].desc[1] = 0; 2435 priv->flc_desc[0].desc[2] = 0; 2436 2437 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) { 2438 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1, 2439 0, SHR_SERIAL, 2440 &cipherdata, &authdata, 2441 session->iv.length, 2442 session->digest_length, 2443 session->dir); 2444 if (bufsize < 0) { 2445 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2446 goto error_out; 2447 } 2448 } else { 2449 DPAA2_SEC_ERR("Hash before cipher not supported"); 2450 goto error_out; 2451 } 2452 2453 flc->word1_sdl = (uint8_t)bufsize; 2454 session->ctxt = priv; 2455 #ifdef CAAM_DESC_DEBUG 2456 int i; 2457 for (i = 0; i < bufsize; i++) 2458 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", 2459 i, priv->flc_desc[0].desc[i]); 2460 #endif 2461 2462 return 0; 2463 2464 error_out: 2465 rte_free(session->cipher_key.data); 2466 rte_free(session->auth_key.data); 2467 rte_free(priv); 2468 return -1; 2469 } 2470 2471 static int 2472 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev, 2473 struct rte_crypto_sym_xform *xform, void *sess) 2474 { 2475 dpaa2_sec_session *session = sess; 2476 int ret; 2477 2478 PMD_INIT_FUNC_TRACE(); 2479 2480 if (unlikely(sess == NULL)) { 2481 DPAA2_SEC_ERR("Invalid session struct"); 2482 return -1; 2483 } 2484 2485 memset(session, 0, sizeof(dpaa2_sec_session)); 2486 /* Default IV length = 0 */ 2487 session->iv.length = 0; 2488 2489 /* Cipher Only */ 2490 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 2491 ret = dpaa2_sec_cipher_init(dev, xform, session); 2492 2493 /* Authentication Only */ 2494 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2495 xform->next == NULL) { 2496 ret = dpaa2_sec_auth_init(dev, xform, session); 2497 2498 /* Cipher then Authenticate */ 2499 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 2500 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2501 session->ext_params.aead_ctxt.auth_cipher_text = true; 2502 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL) 2503 ret = dpaa2_sec_auth_init(dev, xform, session); 2504 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL) 2505 ret = dpaa2_sec_cipher_init(dev, xform, session); 2506 else 2507 ret = dpaa2_sec_aead_chain_init(dev, xform, session); 2508 /* Authenticate then Cipher */ 2509 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2510 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2511 session->ext_params.aead_ctxt.auth_cipher_text = false; 2512 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL) 2513 ret = dpaa2_sec_cipher_init(dev, xform, session); 2514 else if (xform->next->cipher.algo == RTE_CRYPTO_CIPHER_NULL) 2515 ret = dpaa2_sec_auth_init(dev, xform, session); 2516 else 2517 ret = dpaa2_sec_aead_chain_init(dev, xform, session); 2518 /* AEAD operation for AES-GCM kind of Algorithms */ 2519 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && 2520 xform->next == NULL) { 2521 ret = dpaa2_sec_aead_init(dev, xform, session); 2522 2523 } else { 2524 DPAA2_SEC_ERR("Invalid crypto type"); 2525 return -EINVAL; 2526 } 2527 2528 return ret; 2529 } 2530 2531 #ifdef RTE_LIBRTE_SECURITY 2532 static int 2533 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform, 2534 dpaa2_sec_session *session, 2535 struct alginfo *aeaddata) 2536 { 2537 PMD_INIT_FUNC_TRACE(); 2538 2539 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 2540 RTE_CACHE_LINE_SIZE); 2541 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 2542 DPAA2_SEC_ERR("No Memory for aead key"); 2543 return -1; 2544 } 2545 memcpy(session->aead_key.data, aead_xform->key.data, 2546 aead_xform->key.length); 2547 2548 session->digest_length = aead_xform->digest_length; 2549 session->aead_key.length = aead_xform->key.length; 2550 2551 aeaddata->key = (size_t)session->aead_key.data; 2552 aeaddata->keylen = session->aead_key.length; 2553 aeaddata->key_enc_flags = 0; 2554 aeaddata->key_type = RTA_DATA_IMM; 2555 2556 switch (aead_xform->algo) { 2557 case RTE_CRYPTO_AEAD_AES_GCM: 2558 switch (session->digest_length) { 2559 case 8: 2560 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM8; 2561 break; 2562 case 12: 2563 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM12; 2564 break; 2565 case 16: 2566 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM16; 2567 break; 2568 default: 2569 DPAA2_SEC_ERR("Crypto: Undefined GCM digest %d", 2570 session->digest_length); 2571 return -1; 2572 } 2573 aeaddata->algmode = OP_ALG_AAI_GCM; 2574 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; 2575 break; 2576 case RTE_CRYPTO_AEAD_AES_CCM: 2577 switch (session->digest_length) { 2578 case 8: 2579 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM8; 2580 break; 2581 case 12: 2582 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM12; 2583 break; 2584 case 16: 2585 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM16; 2586 break; 2587 default: 2588 DPAA2_SEC_ERR("Crypto: Undefined CCM digest %d", 2589 session->digest_length); 2590 return -1; 2591 } 2592 aeaddata->algmode = OP_ALG_AAI_CCM; 2593 session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM; 2594 break; 2595 default: 2596 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u", 2597 aead_xform->algo); 2598 return -1; 2599 } 2600 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 2601 DIR_ENC : DIR_DEC; 2602 2603 return 0; 2604 } 2605 2606 static int 2607 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform, 2608 struct rte_crypto_auth_xform *auth_xform, 2609 dpaa2_sec_session *session, 2610 struct alginfo *cipherdata, 2611 struct alginfo *authdata) 2612 { 2613 if (cipher_xform) { 2614 session->cipher_key.data = rte_zmalloc(NULL, 2615 cipher_xform->key.length, 2616 RTE_CACHE_LINE_SIZE); 2617 if (session->cipher_key.data == NULL && 2618 cipher_xform->key.length > 0) { 2619 DPAA2_SEC_ERR("No Memory for cipher key"); 2620 return -ENOMEM; 2621 } 2622 2623 session->cipher_key.length = cipher_xform->key.length; 2624 memcpy(session->cipher_key.data, cipher_xform->key.data, 2625 cipher_xform->key.length); 2626 session->cipher_alg = cipher_xform->algo; 2627 } else { 2628 session->cipher_key.data = NULL; 2629 session->cipher_key.length = 0; 2630 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2631 } 2632 2633 if (auth_xform) { 2634 session->auth_key.data = rte_zmalloc(NULL, 2635 auth_xform->key.length, 2636 RTE_CACHE_LINE_SIZE); 2637 if (session->auth_key.data == NULL && 2638 auth_xform->key.length > 0) { 2639 DPAA2_SEC_ERR("No Memory for auth key"); 2640 return -ENOMEM; 2641 } 2642 session->auth_key.length = auth_xform->key.length; 2643 memcpy(session->auth_key.data, auth_xform->key.data, 2644 auth_xform->key.length); 2645 session->auth_alg = auth_xform->algo; 2646 session->digest_length = auth_xform->digest_length; 2647 } else { 2648 session->auth_key.data = NULL; 2649 session->auth_key.length = 0; 2650 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 2651 } 2652 2653 authdata->key = (size_t)session->auth_key.data; 2654 authdata->keylen = session->auth_key.length; 2655 authdata->key_enc_flags = 0; 2656 authdata->key_type = RTA_DATA_IMM; 2657 switch (session->auth_alg) { 2658 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2659 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96; 2660 authdata->algmode = OP_ALG_AAI_HMAC; 2661 break; 2662 case RTE_CRYPTO_AUTH_MD5_HMAC: 2663 authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96; 2664 authdata->algmode = OP_ALG_AAI_HMAC; 2665 break; 2666 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2667 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128; 2668 authdata->algmode = OP_ALG_AAI_HMAC; 2669 if (session->digest_length != 16) 2670 DPAA2_SEC_WARN( 2671 "+++Using sha256-hmac truncated len is non-standard," 2672 "it will not work with lookaside proto"); 2673 break; 2674 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2675 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192; 2676 authdata->algmode = OP_ALG_AAI_HMAC; 2677 break; 2678 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2679 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256; 2680 authdata->algmode = OP_ALG_AAI_HMAC; 2681 break; 2682 case RTE_CRYPTO_AUTH_AES_CMAC: 2683 authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96; 2684 break; 2685 case RTE_CRYPTO_AUTH_NULL: 2686 authdata->algtype = OP_PCL_IPSEC_HMAC_NULL; 2687 break; 2688 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2689 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2690 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2691 case RTE_CRYPTO_AUTH_SHA1: 2692 case RTE_CRYPTO_AUTH_SHA256: 2693 case RTE_CRYPTO_AUTH_SHA512: 2694 case RTE_CRYPTO_AUTH_SHA224: 2695 case RTE_CRYPTO_AUTH_SHA384: 2696 case RTE_CRYPTO_AUTH_MD5: 2697 case RTE_CRYPTO_AUTH_AES_GMAC: 2698 case RTE_CRYPTO_AUTH_KASUMI_F9: 2699 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 2700 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2701 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 2702 session->auth_alg); 2703 return -1; 2704 default: 2705 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", 2706 session->auth_alg); 2707 return -1; 2708 } 2709 cipherdata->key = (size_t)session->cipher_key.data; 2710 cipherdata->keylen = session->cipher_key.length; 2711 cipherdata->key_enc_flags = 0; 2712 cipherdata->key_type = RTA_DATA_IMM; 2713 2714 switch (session->cipher_alg) { 2715 case RTE_CRYPTO_CIPHER_AES_CBC: 2716 cipherdata->algtype = OP_PCL_IPSEC_AES_CBC; 2717 cipherdata->algmode = OP_ALG_AAI_CBC; 2718 break; 2719 case RTE_CRYPTO_CIPHER_3DES_CBC: 2720 cipherdata->algtype = OP_PCL_IPSEC_3DES; 2721 cipherdata->algmode = OP_ALG_AAI_CBC; 2722 break; 2723 case RTE_CRYPTO_CIPHER_AES_CTR: 2724 cipherdata->algtype = OP_PCL_IPSEC_AES_CTR; 2725 cipherdata->algmode = OP_ALG_AAI_CTR; 2726 break; 2727 case RTE_CRYPTO_CIPHER_NULL: 2728 cipherdata->algtype = OP_PCL_IPSEC_NULL; 2729 break; 2730 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2731 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2732 case RTE_CRYPTO_CIPHER_3DES_ECB: 2733 case RTE_CRYPTO_CIPHER_AES_ECB: 2734 case RTE_CRYPTO_CIPHER_KASUMI_F8: 2735 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", 2736 session->cipher_alg); 2737 return -1; 2738 default: 2739 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 2740 session->cipher_alg); 2741 return -1; 2742 } 2743 2744 return 0; 2745 } 2746 2747 #ifdef RTE_LIBRTE_SECURITY_TEST 2748 static uint8_t aes_cbc_iv[] = { 2749 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 2750 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }; 2751 #endif 2752 2753 static int 2754 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, 2755 struct rte_security_session_conf *conf, 2756 void *sess) 2757 { 2758 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec; 2759 struct rte_crypto_cipher_xform *cipher_xform = NULL; 2760 struct rte_crypto_auth_xform *auth_xform = NULL; 2761 struct rte_crypto_aead_xform *aead_xform = NULL; 2762 dpaa2_sec_session *session = (dpaa2_sec_session *)sess; 2763 struct ctxt_priv *priv; 2764 struct alginfo authdata, cipherdata; 2765 int bufsize; 2766 struct sec_flow_context *flc; 2767 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 2768 int ret = -1; 2769 2770 PMD_INIT_FUNC_TRACE(); 2771 2772 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2773 sizeof(struct ctxt_priv) + 2774 sizeof(struct sec_flc_desc), 2775 RTE_CACHE_LINE_SIZE); 2776 2777 if (priv == NULL) { 2778 DPAA2_SEC_ERR("No memory for priv CTXT"); 2779 return -ENOMEM; 2780 } 2781 2782 priv->fle_pool = dev_priv->fle_pool; 2783 flc = &priv->flc_desc[0].flc; 2784 2785 memset(session, 0, sizeof(dpaa2_sec_session)); 2786 2787 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2788 cipher_xform = &conf->crypto_xform->cipher; 2789 if (conf->crypto_xform->next) 2790 auth_xform = &conf->crypto_xform->next->auth; 2791 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform, 2792 session, &cipherdata, &authdata); 2793 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2794 auth_xform = &conf->crypto_xform->auth; 2795 if (conf->crypto_xform->next) 2796 cipher_xform = &conf->crypto_xform->next->cipher; 2797 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform, 2798 session, &cipherdata, &authdata); 2799 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { 2800 aead_xform = &conf->crypto_xform->aead; 2801 ret = dpaa2_sec_ipsec_aead_init(aead_xform, 2802 session, &cipherdata); 2803 authdata.keylen = 0; 2804 authdata.algtype = 0; 2805 } else { 2806 DPAA2_SEC_ERR("XFORM not specified"); 2807 ret = -EINVAL; 2808 goto out; 2809 } 2810 if (ret) { 2811 DPAA2_SEC_ERR("Failed to process xform"); 2812 goto out; 2813 } 2814 2815 session->ctxt_type = DPAA2_SEC_IPSEC; 2816 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 2817 uint8_t *hdr = NULL; 2818 struct ip ip4_hdr; 2819 struct rte_ipv6_hdr ip6_hdr; 2820 struct ipsec_encap_pdb encap_pdb; 2821 2822 flc->dhr = SEC_FLC_DHR_OUTBOUND; 2823 /* For Sec Proto only one descriptor is required. */ 2824 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb)); 2825 2826 /* copy algo specific data to PDB */ 2827 switch (cipherdata.algtype) { 2828 case OP_PCL_IPSEC_AES_CTR: 2829 encap_pdb.ctr.ctr_initial = 0x00000001; 2830 encap_pdb.ctr.ctr_nonce = ipsec_xform->salt; 2831 break; 2832 case OP_PCL_IPSEC_AES_GCM8: 2833 case OP_PCL_IPSEC_AES_GCM12: 2834 case OP_PCL_IPSEC_AES_GCM16: 2835 memcpy(encap_pdb.gcm.salt, 2836 (uint8_t *)&(ipsec_xform->salt), 4); 2837 break; 2838 } 2839 2840 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) | 2841 PDBOPTS_ESP_OIHI_PDB_INL | 2842 PDBOPTS_ESP_IVSRC | 2843 PDBHMO_ESP_ENCAP_DTTL | 2844 PDBHMO_ESP_SNR; 2845 if (ipsec_xform->options.esn) 2846 encap_pdb.options |= PDBOPTS_ESP_ESN; 2847 encap_pdb.spi = ipsec_xform->spi; 2848 session->dir = DIR_ENC; 2849 if (ipsec_xform->tunnel.type == 2850 RTE_SECURITY_IPSEC_TUNNEL_IPV4) { 2851 encap_pdb.ip_hdr_len = sizeof(struct ip); 2852 ip4_hdr.ip_v = IPVERSION; 2853 ip4_hdr.ip_hl = 5; 2854 ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr)); 2855 ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp; 2856 ip4_hdr.ip_id = 0; 2857 ip4_hdr.ip_off = 0; 2858 ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl; 2859 ip4_hdr.ip_p = IPPROTO_ESP; 2860 ip4_hdr.ip_sum = 0; 2861 ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip; 2862 ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip; 2863 ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *) 2864 &ip4_hdr, sizeof(struct ip)); 2865 hdr = (uint8_t *)&ip4_hdr; 2866 } else if (ipsec_xform->tunnel.type == 2867 RTE_SECURITY_IPSEC_TUNNEL_IPV6) { 2868 ip6_hdr.vtc_flow = rte_cpu_to_be_32( 2869 DPAA2_IPv6_DEFAULT_VTC_FLOW | 2870 ((ipsec_xform->tunnel.ipv6.dscp << 2871 RTE_IPV6_HDR_TC_SHIFT) & 2872 RTE_IPV6_HDR_TC_MASK) | 2873 ((ipsec_xform->tunnel.ipv6.flabel << 2874 RTE_IPV6_HDR_FL_SHIFT) & 2875 RTE_IPV6_HDR_FL_MASK)); 2876 /* Payload length will be updated by HW */ 2877 ip6_hdr.payload_len = 0; 2878 ip6_hdr.hop_limits = 2879 ipsec_xform->tunnel.ipv6.hlimit; 2880 ip6_hdr.proto = (ipsec_xform->proto == 2881 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? 2882 IPPROTO_ESP : IPPROTO_AH; 2883 memcpy(&ip6_hdr.src_addr, 2884 &ipsec_xform->tunnel.ipv6.src_addr, 16); 2885 memcpy(&ip6_hdr.dst_addr, 2886 &ipsec_xform->tunnel.ipv6.dst_addr, 16); 2887 encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr); 2888 hdr = (uint8_t *)&ip6_hdr; 2889 } 2890 2891 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc, 2892 1, 0, SHR_SERIAL, &encap_pdb, 2893 hdr, &cipherdata, &authdata); 2894 } else if (ipsec_xform->direction == 2895 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { 2896 struct ipsec_decap_pdb decap_pdb; 2897 2898 flc->dhr = SEC_FLC_DHR_INBOUND; 2899 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb)); 2900 /* copy algo specific data to PDB */ 2901 switch (cipherdata.algtype) { 2902 case OP_PCL_IPSEC_AES_CTR: 2903 decap_pdb.ctr.ctr_initial = 0x00000001; 2904 decap_pdb.ctr.ctr_nonce = ipsec_xform->salt; 2905 break; 2906 case OP_PCL_IPSEC_AES_GCM8: 2907 case OP_PCL_IPSEC_AES_GCM12: 2908 case OP_PCL_IPSEC_AES_GCM16: 2909 memcpy(decap_pdb.gcm.salt, 2910 (uint8_t *)&(ipsec_xform->salt), 4); 2911 break; 2912 } 2913 2914 decap_pdb.options = (ipsec_xform->tunnel.type == 2915 RTE_SECURITY_IPSEC_TUNNEL_IPV4) ? 2916 sizeof(struct ip) << 16 : 2917 sizeof(struct rte_ipv6_hdr) << 16; 2918 if (ipsec_xform->options.esn) 2919 decap_pdb.options |= PDBOPTS_ESP_ESN; 2920 2921 if (ipsec_xform->replay_win_sz) { 2922 uint32_t win_sz; 2923 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz); 2924 2925 switch (win_sz) { 2926 case 1: 2927 case 2: 2928 case 4: 2929 case 8: 2930 case 16: 2931 case 32: 2932 decap_pdb.options |= PDBOPTS_ESP_ARS32; 2933 break; 2934 case 64: 2935 decap_pdb.options |= PDBOPTS_ESP_ARS64; 2936 break; 2937 default: 2938 decap_pdb.options |= PDBOPTS_ESP_ARS128; 2939 } 2940 } 2941 session->dir = DIR_DEC; 2942 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc, 2943 1, 0, SHR_SERIAL, 2944 &decap_pdb, &cipherdata, &authdata); 2945 } else 2946 goto out; 2947 2948 if (bufsize < 0) { 2949 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2950 goto out; 2951 } 2952 2953 flc->word1_sdl = (uint8_t)bufsize; 2954 2955 /* Enable the stashing control bit */ 2956 DPAA2_SET_FLC_RSC(flc); 2957 flc->word2_rflc_31_0 = lower_32_bits( 2958 (size_t)&(((struct dpaa2_sec_qp *) 2959 dev->data->queue_pairs[0])->rx_vq) | 0x14); 2960 flc->word3_rflc_63_32 = upper_32_bits( 2961 (size_t)&(((struct dpaa2_sec_qp *) 2962 dev->data->queue_pairs[0])->rx_vq)); 2963 2964 /* Set EWS bit i.e. enable write-safe */ 2965 DPAA2_SET_FLC_EWS(flc); 2966 /* Set BS = 1 i.e reuse input buffers as output buffers */ 2967 DPAA2_SET_FLC_REUSE_BS(flc); 2968 /* Set FF = 10; reuse input buffers if they provide sufficient space */ 2969 DPAA2_SET_FLC_REUSE_FF(flc); 2970 2971 session->ctxt = priv; 2972 2973 return 0; 2974 out: 2975 rte_free(session->auth_key.data); 2976 rte_free(session->cipher_key.data); 2977 rte_free(priv); 2978 return ret; 2979 } 2980 2981 static int 2982 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev, 2983 struct rte_security_session_conf *conf, 2984 void *sess) 2985 { 2986 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp; 2987 struct rte_crypto_sym_xform *xform = conf->crypto_xform; 2988 struct rte_crypto_auth_xform *auth_xform = NULL; 2989 struct rte_crypto_cipher_xform *cipher_xform; 2990 dpaa2_sec_session *session = (dpaa2_sec_session *)sess; 2991 struct ctxt_priv *priv; 2992 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 2993 struct alginfo authdata, cipherdata; 2994 struct alginfo *p_authdata = NULL; 2995 int bufsize = -1; 2996 struct sec_flow_context *flc; 2997 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 2998 int swap = true; 2999 #else 3000 int swap = false; 3001 #endif 3002 3003 PMD_INIT_FUNC_TRACE(); 3004 3005 memset(session, 0, sizeof(dpaa2_sec_session)); 3006 3007 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 3008 sizeof(struct ctxt_priv) + 3009 sizeof(struct sec_flc_desc), 3010 RTE_CACHE_LINE_SIZE); 3011 3012 if (priv == NULL) { 3013 DPAA2_SEC_ERR("No memory for priv CTXT"); 3014 return -ENOMEM; 3015 } 3016 3017 priv->fle_pool = dev_priv->fle_pool; 3018 flc = &priv->flc_desc[0].flc; 3019 3020 /* find xfrm types */ 3021 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 3022 cipher_xform = &xform->cipher; 3023 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 3024 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 3025 session->ext_params.aead_ctxt.auth_cipher_text = true; 3026 cipher_xform = &xform->cipher; 3027 auth_xform = &xform->next->auth; 3028 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 3029 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 3030 session->ext_params.aead_ctxt.auth_cipher_text = false; 3031 cipher_xform = &xform->next->cipher; 3032 auth_xform = &xform->auth; 3033 } else { 3034 DPAA2_SEC_ERR("Invalid crypto type"); 3035 return -EINVAL; 3036 } 3037 3038 session->ctxt_type = DPAA2_SEC_PDCP; 3039 if (cipher_xform) { 3040 session->cipher_key.data = rte_zmalloc(NULL, 3041 cipher_xform->key.length, 3042 RTE_CACHE_LINE_SIZE); 3043 if (session->cipher_key.data == NULL && 3044 cipher_xform->key.length > 0) { 3045 DPAA2_SEC_ERR("No Memory for cipher key"); 3046 rte_free(priv); 3047 return -ENOMEM; 3048 } 3049 session->cipher_key.length = cipher_xform->key.length; 3050 memcpy(session->cipher_key.data, cipher_xform->key.data, 3051 cipher_xform->key.length); 3052 session->dir = 3053 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 3054 DIR_ENC : DIR_DEC; 3055 session->cipher_alg = cipher_xform->algo; 3056 } else { 3057 session->cipher_key.data = NULL; 3058 session->cipher_key.length = 0; 3059 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 3060 session->dir = DIR_ENC; 3061 } 3062 3063 session->pdcp.domain = pdcp_xform->domain; 3064 session->pdcp.bearer = pdcp_xform->bearer; 3065 session->pdcp.pkt_dir = pdcp_xform->pkt_dir; 3066 session->pdcp.sn_size = pdcp_xform->sn_size; 3067 session->pdcp.hfn = pdcp_xform->hfn; 3068 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold; 3069 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd; 3070 /* hfv ovd offset location is stored in iv.offset value*/ 3071 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset; 3072 3073 cipherdata.key = (size_t)session->cipher_key.data; 3074 cipherdata.keylen = session->cipher_key.length; 3075 cipherdata.key_enc_flags = 0; 3076 cipherdata.key_type = RTA_DATA_IMM; 3077 3078 switch (session->cipher_alg) { 3079 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 3080 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW; 3081 break; 3082 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 3083 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC; 3084 break; 3085 case RTE_CRYPTO_CIPHER_AES_CTR: 3086 cipherdata.algtype = PDCP_CIPHER_TYPE_AES; 3087 break; 3088 case RTE_CRYPTO_CIPHER_NULL: 3089 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL; 3090 break; 3091 default: 3092 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 3093 session->cipher_alg); 3094 goto out; 3095 } 3096 3097 if (auth_xform) { 3098 session->auth_key.data = rte_zmalloc(NULL, 3099 auth_xform->key.length, 3100 RTE_CACHE_LINE_SIZE); 3101 if (!session->auth_key.data && 3102 auth_xform->key.length > 0) { 3103 DPAA2_SEC_ERR("No Memory for auth key"); 3104 rte_free(session->cipher_key.data); 3105 rte_free(priv); 3106 return -ENOMEM; 3107 } 3108 session->auth_key.length = auth_xform->key.length; 3109 memcpy(session->auth_key.data, auth_xform->key.data, 3110 auth_xform->key.length); 3111 session->auth_alg = auth_xform->algo; 3112 } else { 3113 session->auth_key.data = NULL; 3114 session->auth_key.length = 0; 3115 session->auth_alg = 0; 3116 } 3117 authdata.key = (size_t)session->auth_key.data; 3118 authdata.keylen = session->auth_key.length; 3119 authdata.key_enc_flags = 0; 3120 authdata.key_type = RTA_DATA_IMM; 3121 3122 if (session->auth_alg) { 3123 switch (session->auth_alg) { 3124 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 3125 authdata.algtype = PDCP_AUTH_TYPE_SNOW; 3126 break; 3127 case RTE_CRYPTO_AUTH_ZUC_EIA3: 3128 authdata.algtype = PDCP_AUTH_TYPE_ZUC; 3129 break; 3130 case RTE_CRYPTO_AUTH_AES_CMAC: 3131 authdata.algtype = PDCP_AUTH_TYPE_AES; 3132 break; 3133 case RTE_CRYPTO_AUTH_NULL: 3134 authdata.algtype = PDCP_AUTH_TYPE_NULL; 3135 break; 3136 default: 3137 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 3138 session->auth_alg); 3139 goto out; 3140 } 3141 3142 p_authdata = &authdata; 3143 } else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 3144 DPAA2_SEC_ERR("Crypto: Integrity must for c-plane"); 3145 goto out; 3146 } 3147 3148 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 3149 if (session->dir == DIR_ENC) 3150 bufsize = cnstr_shdsc_pdcp_c_plane_encap( 3151 priv->flc_desc[0].desc, 1, swap, 3152 pdcp_xform->hfn, 3153 session->pdcp.sn_size, 3154 pdcp_xform->bearer, 3155 pdcp_xform->pkt_dir, 3156 pdcp_xform->hfn_threshold, 3157 &cipherdata, &authdata, 3158 0); 3159 else if (session->dir == DIR_DEC) 3160 bufsize = cnstr_shdsc_pdcp_c_plane_decap( 3161 priv->flc_desc[0].desc, 1, swap, 3162 pdcp_xform->hfn, 3163 session->pdcp.sn_size, 3164 pdcp_xform->bearer, 3165 pdcp_xform->pkt_dir, 3166 pdcp_xform->hfn_threshold, 3167 &cipherdata, &authdata, 3168 0); 3169 } else { 3170 if (session->dir == DIR_ENC) 3171 bufsize = cnstr_shdsc_pdcp_u_plane_encap( 3172 priv->flc_desc[0].desc, 1, swap, 3173 session->pdcp.sn_size, 3174 pdcp_xform->hfn, 3175 pdcp_xform->bearer, 3176 pdcp_xform->pkt_dir, 3177 pdcp_xform->hfn_threshold, 3178 &cipherdata, p_authdata, 0); 3179 else if (session->dir == DIR_DEC) 3180 bufsize = cnstr_shdsc_pdcp_u_plane_decap( 3181 priv->flc_desc[0].desc, 1, swap, 3182 session->pdcp.sn_size, 3183 pdcp_xform->hfn, 3184 pdcp_xform->bearer, 3185 pdcp_xform->pkt_dir, 3186 pdcp_xform->hfn_threshold, 3187 &cipherdata, p_authdata, 0); 3188 } 3189 3190 if (bufsize < 0) { 3191 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 3192 goto out; 3193 } 3194 3195 /* Enable the stashing control bit */ 3196 DPAA2_SET_FLC_RSC(flc); 3197 flc->word2_rflc_31_0 = lower_32_bits( 3198 (size_t)&(((struct dpaa2_sec_qp *) 3199 dev->data->queue_pairs[0])->rx_vq) | 0x14); 3200 flc->word3_rflc_63_32 = upper_32_bits( 3201 (size_t)&(((struct dpaa2_sec_qp *) 3202 dev->data->queue_pairs[0])->rx_vq)); 3203 3204 flc->word1_sdl = (uint8_t)bufsize; 3205 3206 /* TODO - check the perf impact or 3207 * align as per descriptor type 3208 * Set EWS bit i.e. enable write-safe 3209 * DPAA2_SET_FLC_EWS(flc); 3210 */ 3211 3212 /* Set BS = 1 i.e reuse input buffers as output buffers */ 3213 DPAA2_SET_FLC_REUSE_BS(flc); 3214 /* Set FF = 10; reuse input buffers if they provide sufficient space */ 3215 DPAA2_SET_FLC_REUSE_FF(flc); 3216 3217 session->ctxt = priv; 3218 3219 return 0; 3220 out: 3221 rte_free(session->auth_key.data); 3222 rte_free(session->cipher_key.data); 3223 rte_free(priv); 3224 return -1; 3225 } 3226 3227 static int 3228 dpaa2_sec_security_session_create(void *dev, 3229 struct rte_security_session_conf *conf, 3230 struct rte_security_session *sess, 3231 struct rte_mempool *mempool) 3232 { 3233 void *sess_private_data; 3234 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; 3235 int ret; 3236 3237 if (rte_mempool_get(mempool, &sess_private_data)) { 3238 DPAA2_SEC_ERR("Couldn't get object from session mempool"); 3239 return -ENOMEM; 3240 } 3241 3242 switch (conf->protocol) { 3243 case RTE_SECURITY_PROTOCOL_IPSEC: 3244 ret = dpaa2_sec_set_ipsec_session(cdev, conf, 3245 sess_private_data); 3246 break; 3247 case RTE_SECURITY_PROTOCOL_MACSEC: 3248 return -ENOTSUP; 3249 case RTE_SECURITY_PROTOCOL_PDCP: 3250 ret = dpaa2_sec_set_pdcp_session(cdev, conf, 3251 sess_private_data); 3252 break; 3253 default: 3254 return -EINVAL; 3255 } 3256 if (ret != 0) { 3257 DPAA2_SEC_ERR("Failed to configure session parameters"); 3258 /* Return session to mempool */ 3259 rte_mempool_put(mempool, sess_private_data); 3260 return ret; 3261 } 3262 3263 set_sec_session_private_data(sess, sess_private_data); 3264 3265 return ret; 3266 } 3267 3268 /** Clear the memory of session so it doesn't leave key material behind */ 3269 static int 3270 dpaa2_sec_security_session_destroy(void *dev __rte_unused, 3271 struct rte_security_session *sess) 3272 { 3273 PMD_INIT_FUNC_TRACE(); 3274 void *sess_priv = get_sec_session_private_data(sess); 3275 3276 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv; 3277 3278 if (sess_priv) { 3279 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 3280 3281 rte_free(s->ctxt); 3282 rte_free(s->cipher_key.data); 3283 rte_free(s->auth_key.data); 3284 memset(s, 0, sizeof(dpaa2_sec_session)); 3285 set_sec_session_private_data(sess, NULL); 3286 rte_mempool_put(sess_mp, sess_priv); 3287 } 3288 return 0; 3289 } 3290 #endif 3291 static int 3292 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev, 3293 struct rte_crypto_sym_xform *xform, 3294 struct rte_cryptodev_sym_session *sess, 3295 struct rte_mempool *mempool) 3296 { 3297 void *sess_private_data; 3298 int ret; 3299 3300 if (rte_mempool_get(mempool, &sess_private_data)) { 3301 DPAA2_SEC_ERR("Couldn't get object from session mempool"); 3302 return -ENOMEM; 3303 } 3304 3305 ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data); 3306 if (ret != 0) { 3307 DPAA2_SEC_ERR("Failed to configure session parameters"); 3308 /* Return session to mempool */ 3309 rte_mempool_put(mempool, sess_private_data); 3310 return ret; 3311 } 3312 3313 set_sym_session_private_data(sess, dev->driver_id, 3314 sess_private_data); 3315 3316 return 0; 3317 } 3318 3319 /** Clear the memory of session so it doesn't leave key material behind */ 3320 static void 3321 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev, 3322 struct rte_cryptodev_sym_session *sess) 3323 { 3324 PMD_INIT_FUNC_TRACE(); 3325 uint8_t index = dev->driver_id; 3326 void *sess_priv = get_sym_session_private_data(sess, index); 3327 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv; 3328 3329 if (sess_priv) { 3330 rte_free(s->ctxt); 3331 rte_free(s->cipher_key.data); 3332 rte_free(s->auth_key.data); 3333 memset(s, 0, sizeof(dpaa2_sec_session)); 3334 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 3335 set_sym_session_private_data(sess, index, NULL); 3336 rte_mempool_put(sess_mp, sess_priv); 3337 } 3338 } 3339 3340 static int 3341 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused, 3342 struct rte_cryptodev_config *config __rte_unused) 3343 { 3344 PMD_INIT_FUNC_TRACE(); 3345 3346 return 0; 3347 } 3348 3349 static int 3350 dpaa2_sec_dev_start(struct rte_cryptodev *dev) 3351 { 3352 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3353 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3354 struct dpseci_attr attr; 3355 struct dpaa2_queue *dpaa2_q; 3356 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3357 dev->data->queue_pairs; 3358 struct dpseci_rx_queue_attr rx_attr; 3359 struct dpseci_tx_queue_attr tx_attr; 3360 int ret, i; 3361 3362 PMD_INIT_FUNC_TRACE(); 3363 3364 memset(&attr, 0, sizeof(struct dpseci_attr)); 3365 3366 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token); 3367 if (ret) { 3368 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED", 3369 priv->hw_id); 3370 goto get_attr_failure; 3371 } 3372 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr); 3373 if (ret) { 3374 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC"); 3375 goto get_attr_failure; 3376 } 3377 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) { 3378 dpaa2_q = &qp[i]->rx_vq; 3379 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 3380 &rx_attr); 3381 dpaa2_q->fqid = rx_attr.fqid; 3382 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid); 3383 } 3384 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) { 3385 dpaa2_q = &qp[i]->tx_vq; 3386 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 3387 &tx_attr); 3388 dpaa2_q->fqid = tx_attr.fqid; 3389 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid); 3390 } 3391 3392 return 0; 3393 get_attr_failure: 3394 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 3395 return -1; 3396 } 3397 3398 static void 3399 dpaa2_sec_dev_stop(struct rte_cryptodev *dev) 3400 { 3401 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3402 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3403 int ret; 3404 3405 PMD_INIT_FUNC_TRACE(); 3406 3407 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 3408 if (ret) { 3409 DPAA2_SEC_ERR("Failure in disabling dpseci %d device", 3410 priv->hw_id); 3411 return; 3412 } 3413 3414 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token); 3415 if (ret < 0) { 3416 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret); 3417 return; 3418 } 3419 } 3420 3421 static int 3422 dpaa2_sec_dev_close(struct rte_cryptodev *dev) 3423 { 3424 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3425 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3426 int ret; 3427 3428 PMD_INIT_FUNC_TRACE(); 3429 3430 /* Function is reverse of dpaa2_sec_dev_init. 3431 * It does the following: 3432 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id 3433 * 2. Close the DPSECI device 3434 * 3. Free the allocated resources. 3435 */ 3436 3437 /*Close the device at underlying layer*/ 3438 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token); 3439 if (ret) { 3440 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret); 3441 return -1; 3442 } 3443 3444 /*Free the allocated memory for ethernet private data and dpseci*/ 3445 priv->hw = NULL; 3446 rte_free(dpseci); 3447 3448 return 0; 3449 } 3450 3451 static void 3452 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev, 3453 struct rte_cryptodev_info *info) 3454 { 3455 struct dpaa2_sec_dev_private *internals = dev->data->dev_private; 3456 3457 PMD_INIT_FUNC_TRACE(); 3458 if (info != NULL) { 3459 info->max_nb_queue_pairs = internals->max_nb_queue_pairs; 3460 info->feature_flags = dev->feature_flags; 3461 info->capabilities = dpaa2_sec_capabilities; 3462 /* No limit of number of sessions */ 3463 info->sym.max_nb_sessions = 0; 3464 info->driver_id = cryptodev_driver_id; 3465 } 3466 } 3467 3468 static 3469 void dpaa2_sec_stats_get(struct rte_cryptodev *dev, 3470 struct rte_cryptodev_stats *stats) 3471 { 3472 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3473 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3474 struct dpseci_sec_counters counters = {0}; 3475 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3476 dev->data->queue_pairs; 3477 int ret, i; 3478 3479 PMD_INIT_FUNC_TRACE(); 3480 if (stats == NULL) { 3481 DPAA2_SEC_ERR("Invalid stats ptr NULL"); 3482 return; 3483 } 3484 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 3485 if (qp[i] == NULL) { 3486 DPAA2_SEC_DEBUG("Uninitialised queue pair"); 3487 continue; 3488 } 3489 3490 stats->enqueued_count += qp[i]->tx_vq.tx_pkts; 3491 stats->dequeued_count += qp[i]->rx_vq.rx_pkts; 3492 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts; 3493 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts; 3494 } 3495 3496 ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token, 3497 &counters); 3498 if (ret) { 3499 DPAA2_SEC_ERR("SEC counters failed"); 3500 } else { 3501 DPAA2_SEC_INFO("dpseci hardware stats:" 3502 "\n\tNum of Requests Dequeued = %" PRIu64 3503 "\n\tNum of Outbound Encrypt Requests = %" PRIu64 3504 "\n\tNum of Inbound Decrypt Requests = %" PRIu64 3505 "\n\tNum of Outbound Bytes Encrypted = %" PRIu64 3506 "\n\tNum of Outbound Bytes Protected = %" PRIu64 3507 "\n\tNum of Inbound Bytes Decrypted = %" PRIu64 3508 "\n\tNum of Inbound Bytes Validated = %" PRIu64, 3509 counters.dequeued_requests, 3510 counters.ob_enc_requests, 3511 counters.ib_dec_requests, 3512 counters.ob_enc_bytes, 3513 counters.ob_prot_bytes, 3514 counters.ib_dec_bytes, 3515 counters.ib_valid_bytes); 3516 } 3517 } 3518 3519 static 3520 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev) 3521 { 3522 int i; 3523 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3524 (dev->data->queue_pairs); 3525 3526 PMD_INIT_FUNC_TRACE(); 3527 3528 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 3529 if (qp[i] == NULL) { 3530 DPAA2_SEC_DEBUG("Uninitialised queue pair"); 3531 continue; 3532 } 3533 qp[i]->tx_vq.rx_pkts = 0; 3534 qp[i]->tx_vq.tx_pkts = 0; 3535 qp[i]->tx_vq.err_pkts = 0; 3536 qp[i]->rx_vq.rx_pkts = 0; 3537 qp[i]->rx_vq.tx_pkts = 0; 3538 qp[i]->rx_vq.err_pkts = 0; 3539 } 3540 } 3541 3542 static void __attribute__((hot)) 3543 dpaa2_sec_process_parallel_event(struct qbman_swp *swp, 3544 const struct qbman_fd *fd, 3545 const struct qbman_result *dq, 3546 struct dpaa2_queue *rxq, 3547 struct rte_event *ev) 3548 { 3549 /* Prefetching mbuf */ 3550 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)- 3551 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size)); 3552 3553 /* Prefetching ipsec crypto_op stored in priv data of mbuf */ 3554 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64)); 3555 3556 ev->flow_id = rxq->ev.flow_id; 3557 ev->sub_event_type = rxq->ev.sub_event_type; 3558 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3559 ev->op = RTE_EVENT_OP_NEW; 3560 ev->sched_type = rxq->ev.sched_type; 3561 ev->queue_id = rxq->ev.queue_id; 3562 ev->priority = rxq->ev.priority; 3563 ev->event_ptr = sec_fd_to_mbuf(fd); 3564 3565 qbman_swp_dqrr_consume(swp, dq); 3566 } 3567 static void 3568 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __attribute__((unused)), 3569 const struct qbman_fd *fd, 3570 const struct qbman_result *dq, 3571 struct dpaa2_queue *rxq, 3572 struct rte_event *ev) 3573 { 3574 uint8_t dqrr_index; 3575 struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr; 3576 /* Prefetching mbuf */ 3577 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)- 3578 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size)); 3579 3580 /* Prefetching ipsec crypto_op stored in priv data of mbuf */ 3581 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64)); 3582 3583 ev->flow_id = rxq->ev.flow_id; 3584 ev->sub_event_type = rxq->ev.sub_event_type; 3585 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3586 ev->op = RTE_EVENT_OP_NEW; 3587 ev->sched_type = rxq->ev.sched_type; 3588 ev->queue_id = rxq->ev.queue_id; 3589 ev->priority = rxq->ev.priority; 3590 3591 ev->event_ptr = sec_fd_to_mbuf(fd); 3592 dqrr_index = qbman_get_dqrr_idx(dq); 3593 crypto_op->sym->m_src->seqn = dqrr_index + 1; 3594 DPAA2_PER_LCORE_DQRR_SIZE++; 3595 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; 3596 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src; 3597 } 3598 3599 int 3600 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev, 3601 int qp_id, 3602 struct dpaa2_dpcon_dev *dpcon, 3603 const struct rte_event *event) 3604 { 3605 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3606 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3607 struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id]; 3608 struct dpseci_rx_queue_cfg cfg; 3609 uint8_t priority; 3610 int ret; 3611 3612 if (event->sched_type == RTE_SCHED_TYPE_PARALLEL) 3613 qp->rx_vq.cb = dpaa2_sec_process_parallel_event; 3614 else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) 3615 qp->rx_vq.cb = dpaa2_sec_process_atomic_event; 3616 else 3617 return -EINVAL; 3618 3619 priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / event->priority) * 3620 (dpcon->num_priorities - 1); 3621 3622 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 3623 cfg.options = DPSECI_QUEUE_OPT_DEST; 3624 cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON; 3625 cfg.dest_cfg.dest_id = dpcon->dpcon_id; 3626 cfg.dest_cfg.priority = priority; 3627 3628 cfg.options |= DPSECI_QUEUE_OPT_USER_CTX; 3629 cfg.user_ctx = (size_t)(qp); 3630 if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) { 3631 cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION; 3632 cfg.order_preservation_en = 1; 3633 } 3634 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 3635 qp_id, &cfg); 3636 if (ret) { 3637 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret); 3638 return ret; 3639 } 3640 3641 memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event)); 3642 3643 return 0; 3644 } 3645 3646 int 3647 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev, 3648 int qp_id) 3649 { 3650 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3651 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3652 struct dpseci_rx_queue_cfg cfg; 3653 int ret; 3654 3655 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 3656 cfg.options = DPSECI_QUEUE_OPT_DEST; 3657 cfg.dest_cfg.dest_type = DPSECI_DEST_NONE; 3658 3659 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 3660 qp_id, &cfg); 3661 if (ret) 3662 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret); 3663 3664 return ret; 3665 } 3666 3667 static struct rte_cryptodev_ops crypto_ops = { 3668 .dev_configure = dpaa2_sec_dev_configure, 3669 .dev_start = dpaa2_sec_dev_start, 3670 .dev_stop = dpaa2_sec_dev_stop, 3671 .dev_close = dpaa2_sec_dev_close, 3672 .dev_infos_get = dpaa2_sec_dev_infos_get, 3673 .stats_get = dpaa2_sec_stats_get, 3674 .stats_reset = dpaa2_sec_stats_reset, 3675 .queue_pair_setup = dpaa2_sec_queue_pair_setup, 3676 .queue_pair_release = dpaa2_sec_queue_pair_release, 3677 .queue_pair_count = dpaa2_sec_queue_pair_count, 3678 .sym_session_get_size = dpaa2_sec_sym_session_get_size, 3679 .sym_session_configure = dpaa2_sec_sym_session_configure, 3680 .sym_session_clear = dpaa2_sec_sym_session_clear, 3681 }; 3682 3683 #ifdef RTE_LIBRTE_SECURITY 3684 static const struct rte_security_capability * 3685 dpaa2_sec_capabilities_get(void *device __rte_unused) 3686 { 3687 return dpaa2_sec_security_cap; 3688 } 3689 3690 static const struct rte_security_ops dpaa2_sec_security_ops = { 3691 .session_create = dpaa2_sec_security_session_create, 3692 .session_update = NULL, 3693 .session_stats_get = NULL, 3694 .session_destroy = dpaa2_sec_security_session_destroy, 3695 .set_pkt_metadata = NULL, 3696 .capabilities_get = dpaa2_sec_capabilities_get 3697 }; 3698 #endif 3699 3700 static int 3701 dpaa2_sec_uninit(const struct rte_cryptodev *dev) 3702 { 3703 struct dpaa2_sec_dev_private *internals = dev->data->dev_private; 3704 3705 rte_free(dev->security_ctx); 3706 3707 rte_mempool_free(internals->fle_pool); 3708 3709 DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u", 3710 dev->data->name, rte_socket_id()); 3711 3712 return 0; 3713 } 3714 3715 static int 3716 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev) 3717 { 3718 struct dpaa2_sec_dev_private *internals; 3719 struct rte_device *dev = cryptodev->device; 3720 struct rte_dpaa2_device *dpaa2_dev; 3721 #ifdef RTE_LIBRTE_SECURITY 3722 struct rte_security_ctx *security_instance; 3723 #endif 3724 struct fsl_mc_io *dpseci; 3725 uint16_t token; 3726 struct dpseci_attr attr; 3727 int retcode, hw_id; 3728 char str[30]; 3729 3730 PMD_INIT_FUNC_TRACE(); 3731 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 3732 if (dpaa2_dev == NULL) { 3733 DPAA2_SEC_ERR("DPAA2 SEC device not found"); 3734 return -1; 3735 } 3736 hw_id = dpaa2_dev->object_id; 3737 3738 cryptodev->driver_id = cryptodev_driver_id; 3739 cryptodev->dev_ops = &crypto_ops; 3740 3741 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst; 3742 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst; 3743 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 3744 RTE_CRYPTODEV_FF_HW_ACCELERATED | 3745 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 3746 RTE_CRYPTODEV_FF_SECURITY | 3747 RTE_CRYPTODEV_FF_IN_PLACE_SGL | 3748 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | 3749 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | 3750 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | 3751 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; 3752 3753 internals = cryptodev->data->dev_private; 3754 3755 /* 3756 * For secondary processes, we don't initialise any further as primary 3757 * has already done this work. Only check we don't need a different 3758 * RX function 3759 */ 3760 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 3761 DPAA2_SEC_DEBUG("Device already init by primary process"); 3762 return 0; 3763 } 3764 #ifdef RTE_LIBRTE_SECURITY 3765 /* Initialize security_ctx only for primary process*/ 3766 security_instance = rte_malloc("rte_security_instances_ops", 3767 sizeof(struct rte_security_ctx), 0); 3768 if (security_instance == NULL) 3769 return -ENOMEM; 3770 security_instance->device = (void *)cryptodev; 3771 security_instance->ops = &dpaa2_sec_security_ops; 3772 security_instance->sess_cnt = 0; 3773 cryptodev->security_ctx = security_instance; 3774 #endif 3775 /*Open the rte device via MC and save the handle for further use*/ 3776 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1, 3777 sizeof(struct fsl_mc_io), 0); 3778 if (!dpseci) { 3779 DPAA2_SEC_ERR( 3780 "Error in allocating the memory for dpsec object"); 3781 return -1; 3782 } 3783 dpseci->regs = rte_mcp_ptr_list[0]; 3784 3785 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token); 3786 if (retcode != 0) { 3787 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x", 3788 retcode); 3789 goto init_error; 3790 } 3791 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr); 3792 if (retcode != 0) { 3793 DPAA2_SEC_ERR( 3794 "Cannot get dpsec device attributed: Error = %x", 3795 retcode); 3796 goto init_error; 3797 } 3798 snprintf(cryptodev->data->name, sizeof(cryptodev->data->name), 3799 "dpsec-%u", hw_id); 3800 3801 internals->max_nb_queue_pairs = attr.num_tx_queues; 3802 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs; 3803 internals->hw = dpseci; 3804 internals->token = token; 3805 3806 snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d", 3807 getpid(), cryptodev->data->dev_id); 3808 internals->fle_pool = rte_mempool_create((const char *)str, 3809 FLE_POOL_NUM_BUFS, 3810 FLE_POOL_BUF_SIZE, 3811 FLE_POOL_CACHE_SIZE, 0, 3812 NULL, NULL, NULL, NULL, 3813 SOCKET_ID_ANY, 0); 3814 if (!internals->fle_pool) { 3815 DPAA2_SEC_ERR("Mempool (%s) creation failed", str); 3816 goto init_error; 3817 } 3818 3819 DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name); 3820 return 0; 3821 3822 init_error: 3823 DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name); 3824 3825 /* dpaa2_sec_uninit(crypto_dev_name); */ 3826 return -EFAULT; 3827 } 3828 3829 static int 3830 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused, 3831 struct rte_dpaa2_device *dpaa2_dev) 3832 { 3833 struct rte_cryptodev *cryptodev; 3834 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 3835 3836 int retval; 3837 3838 snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d", 3839 dpaa2_dev->object_id); 3840 3841 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id()); 3842 if (cryptodev == NULL) 3843 return -ENOMEM; 3844 3845 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 3846 cryptodev->data->dev_private = rte_zmalloc_socket( 3847 "cryptodev private structure", 3848 sizeof(struct dpaa2_sec_dev_private), 3849 RTE_CACHE_LINE_SIZE, 3850 rte_socket_id()); 3851 3852 if (cryptodev->data->dev_private == NULL) 3853 rte_panic("Cannot allocate memzone for private " 3854 "device data"); 3855 } 3856 3857 dpaa2_dev->cryptodev = cryptodev; 3858 cryptodev->device = &dpaa2_dev->device; 3859 3860 /* init user callbacks */ 3861 TAILQ_INIT(&(cryptodev->link_intr_cbs)); 3862 3863 if (dpaa2_svr_family == SVR_LX2160A) 3864 rta_set_sec_era(RTA_SEC_ERA_10); 3865 3866 DPAA2_SEC_INFO("2-SEC ERA is %d", rta_get_sec_era()); 3867 3868 /* Invoke PMD device initialization function */ 3869 retval = dpaa2_sec_dev_init(cryptodev); 3870 if (retval == 0) 3871 return 0; 3872 3873 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 3874 rte_free(cryptodev->data->dev_private); 3875 3876 cryptodev->attached = RTE_CRYPTODEV_DETACHED; 3877 3878 return -ENXIO; 3879 } 3880 3881 static int 3882 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev) 3883 { 3884 struct rte_cryptodev *cryptodev; 3885 int ret; 3886 3887 cryptodev = dpaa2_dev->cryptodev; 3888 if (cryptodev == NULL) 3889 return -ENODEV; 3890 3891 ret = dpaa2_sec_uninit(cryptodev); 3892 if (ret) 3893 return ret; 3894 3895 return rte_cryptodev_pmd_destroy(cryptodev); 3896 } 3897 3898 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = { 3899 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA, 3900 .drv_type = DPAA2_CRYPTO, 3901 .driver = { 3902 .name = "DPAA2 SEC PMD" 3903 }, 3904 .probe = cryptodev_dpaa2_sec_probe, 3905 .remove = cryptodev_dpaa2_sec_remove, 3906 }; 3907 3908 static struct cryptodev_driver dpaa2_sec_crypto_drv; 3909 3910 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver); 3911 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, 3912 rte_dpaa2_sec_driver.driver, cryptodev_driver_id); 3913 3914 RTE_INIT(dpaa2_sec_init_log) 3915 { 3916 /* Bus level logs */ 3917 dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2"); 3918 if (dpaa2_logtype_sec >= 0) 3919 rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE); 3920 } 3921