1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2016-2019 NXP 5 * 6 */ 7 8 #include <time.h> 9 #include <net/if.h> 10 #include <unistd.h> 11 12 #include <rte_ip.h> 13 #include <rte_mbuf.h> 14 #include <rte_cryptodev.h> 15 #include <rte_malloc.h> 16 #include <rte_memcpy.h> 17 #include <rte_string_fns.h> 18 #include <rte_cycles.h> 19 #include <rte_kvargs.h> 20 #include <rte_dev.h> 21 #include <rte_cryptodev_pmd.h> 22 #include <rte_common.h> 23 #include <rte_fslmc.h> 24 #include <fslmc_vfio.h> 25 #include <dpaa2_hw_pvt.h> 26 #include <dpaa2_hw_dpio.h> 27 #include <dpaa2_hw_mempool.h> 28 #include <fsl_dpopr.h> 29 #include <fsl_dpseci.h> 30 #include <fsl_mc_sys.h> 31 32 #include "dpaa2_sec_priv.h" 33 #include "dpaa2_sec_event.h" 34 #include "dpaa2_sec_logs.h" 35 36 /* RTA header files */ 37 #include <desc/ipsec.h> 38 #include <desc/pdcp.h> 39 #include <desc/algo.h> 40 41 /* Minimum job descriptor consists of a oneword job descriptor HEADER and 42 * a pointer to the shared descriptor 43 */ 44 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ) 45 #define FSL_VENDOR_ID 0x1957 46 #define FSL_DEVICE_ID 0x410 47 #define FSL_SUBSYSTEM_SEC 1 48 #define FSL_MC_DPSECI_DEVID 3 49 50 #define NO_PREFETCH 0 51 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */ 52 #define FLE_POOL_NUM_BUFS 32000 53 #define FLE_POOL_BUF_SIZE 256 54 #define FLE_POOL_CACHE_SIZE 512 55 #define FLE_SG_MEM_SIZE(num) (FLE_POOL_BUF_SIZE + ((num) * 32)) 56 #define SEC_FLC_DHR_OUTBOUND -114 57 #define SEC_FLC_DHR_INBOUND 0 58 59 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8; 60 61 static uint8_t cryptodev_driver_id; 62 63 int dpaa2_logtype_sec; 64 65 #ifdef RTE_LIBRTE_SECURITY 66 static inline int 67 build_proto_compound_sg_fd(dpaa2_sec_session *sess, 68 struct rte_crypto_op *op, 69 struct qbman_fd *fd, uint16_t bpid) 70 { 71 struct rte_crypto_sym_op *sym_op = op->sym; 72 struct ctxt_priv *priv = sess->ctxt; 73 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 74 struct sec_flow_context *flc; 75 struct rte_mbuf *mbuf; 76 uint32_t in_len = 0, out_len = 0; 77 78 if (sym_op->m_dst) 79 mbuf = sym_op->m_dst; 80 else 81 mbuf = sym_op->m_src; 82 83 /* first FLE entry used to store mbuf and session ctxt */ 84 fle = (struct qbman_fle *)rte_malloc(NULL, 85 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 86 RTE_CACHE_LINE_SIZE); 87 if (unlikely(!fle)) { 88 DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE"); 89 return -1; 90 } 91 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 92 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 93 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 94 95 /* Save the shared descriptor */ 96 flc = &priv->flc_desc[0].flc; 97 98 op_fle = fle + 1; 99 ip_fle = fle + 2; 100 sge = fle + 3; 101 102 if (likely(bpid < MAX_BPID)) { 103 DPAA2_SET_FD_BPID(fd, bpid); 104 DPAA2_SET_FLE_BPID(op_fle, bpid); 105 DPAA2_SET_FLE_BPID(ip_fle, bpid); 106 } else { 107 DPAA2_SET_FD_IVP(fd); 108 DPAA2_SET_FLE_IVP(op_fle); 109 DPAA2_SET_FLE_IVP(ip_fle); 110 } 111 112 /* Configure FD as a FRAME LIST */ 113 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 114 DPAA2_SET_FD_COMPOUND_FMT(fd); 115 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 116 117 /* Configure Output FLE with Scatter/Gather Entry */ 118 DPAA2_SET_FLE_SG_EXT(op_fle); 119 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 120 121 /* Configure Output SGE for Encap/Decap */ 122 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 123 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 124 /* o/p segs */ 125 while (mbuf->next) { 126 sge->length = mbuf->data_len; 127 out_len += sge->length; 128 sge++; 129 mbuf = mbuf->next; 130 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 131 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 132 } 133 /* using buf_len for last buf - so that extra data can be added */ 134 sge->length = mbuf->buf_len - mbuf->data_off; 135 out_len += sge->length; 136 137 DPAA2_SET_FLE_FIN(sge); 138 op_fle->length = out_len; 139 140 sge++; 141 mbuf = sym_op->m_src; 142 143 /* Configure Input FLE with Scatter/Gather Entry */ 144 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 145 DPAA2_SET_FLE_SG_EXT(ip_fle); 146 DPAA2_SET_FLE_FIN(ip_fle); 147 148 /* Configure input SGE for Encap/Decap */ 149 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 150 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 151 sge->length = mbuf->data_len; 152 in_len += sge->length; 153 154 mbuf = mbuf->next; 155 /* i/p segs */ 156 while (mbuf) { 157 sge++; 158 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 159 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 160 sge->length = mbuf->data_len; 161 in_len += sge->length; 162 mbuf = mbuf->next; 163 } 164 ip_fle->length = in_len; 165 DPAA2_SET_FLE_FIN(sge); 166 167 /* In case of PDCP, per packet HFN is stored in 168 * mbuf priv after sym_op. 169 */ 170 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) { 171 uint32_t hfn_ovd = *((uint8_t *)op + sess->pdcp.hfn_ovd_offset); 172 /*enable HFN override override */ 173 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd); 174 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd); 175 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd); 176 } 177 DPAA2_SET_FD_LEN(fd, ip_fle->length); 178 179 return 0; 180 } 181 182 static inline int 183 build_proto_compound_fd(dpaa2_sec_session *sess, 184 struct rte_crypto_op *op, 185 struct qbman_fd *fd, uint16_t bpid) 186 { 187 struct rte_crypto_sym_op *sym_op = op->sym; 188 struct ctxt_priv *priv = sess->ctxt; 189 struct qbman_fle *fle, *ip_fle, *op_fle; 190 struct sec_flow_context *flc; 191 struct rte_mbuf *src_mbuf = sym_op->m_src; 192 struct rte_mbuf *dst_mbuf = sym_op->m_dst; 193 int retval; 194 195 if (!dst_mbuf) 196 dst_mbuf = src_mbuf; 197 198 /* Save the shared descriptor */ 199 flc = &priv->flc_desc[0].flc; 200 201 /* we are using the first FLE entry to store Mbuf */ 202 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 203 if (retval) { 204 DPAA2_SEC_DP_ERR("Memory alloc failed"); 205 return -1; 206 } 207 memset(fle, 0, FLE_POOL_BUF_SIZE); 208 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 209 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 210 211 op_fle = fle + 1; 212 ip_fle = fle + 2; 213 214 if (likely(bpid < MAX_BPID)) { 215 DPAA2_SET_FD_BPID(fd, bpid); 216 DPAA2_SET_FLE_BPID(op_fle, bpid); 217 DPAA2_SET_FLE_BPID(ip_fle, bpid); 218 } else { 219 DPAA2_SET_FD_IVP(fd); 220 DPAA2_SET_FLE_IVP(op_fle); 221 DPAA2_SET_FLE_IVP(ip_fle); 222 } 223 224 /* Configure FD as a FRAME LIST */ 225 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 226 DPAA2_SET_FD_COMPOUND_FMT(fd); 227 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 228 229 /* Configure Output FLE with dst mbuf data */ 230 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf)); 231 DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off); 232 DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len); 233 234 /* Configure Input FLE with src mbuf data */ 235 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf)); 236 DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off); 237 DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len); 238 239 DPAA2_SET_FD_LEN(fd, ip_fle->length); 240 DPAA2_SET_FLE_FIN(ip_fle); 241 242 /* In case of PDCP, per packet HFN is stored in 243 * mbuf priv after sym_op. 244 */ 245 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) { 246 uint32_t hfn_ovd = *((uint8_t *)op + sess->pdcp.hfn_ovd_offset); 247 /*enable HFN override override */ 248 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd); 249 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd); 250 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd); 251 } 252 253 return 0; 254 255 } 256 257 static inline int 258 build_proto_fd(dpaa2_sec_session *sess, 259 struct rte_crypto_op *op, 260 struct qbman_fd *fd, uint16_t bpid) 261 { 262 struct rte_crypto_sym_op *sym_op = op->sym; 263 if (sym_op->m_dst) 264 return build_proto_compound_fd(sess, op, fd, bpid); 265 266 struct ctxt_priv *priv = sess->ctxt; 267 struct sec_flow_context *flc; 268 struct rte_mbuf *mbuf = sym_op->m_src; 269 270 if (likely(bpid < MAX_BPID)) 271 DPAA2_SET_FD_BPID(fd, bpid); 272 else 273 DPAA2_SET_FD_IVP(fd); 274 275 /* Save the shared descriptor */ 276 flc = &priv->flc_desc[0].flc; 277 278 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 279 DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off); 280 DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len); 281 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 282 283 /* save physical address of mbuf */ 284 op->sym->aead.digest.phys_addr = mbuf->buf_iova; 285 mbuf->buf_iova = (size_t)op; 286 287 return 0; 288 } 289 #endif 290 291 static inline int 292 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess, 293 struct rte_crypto_op *op, 294 struct qbman_fd *fd, __rte_unused uint16_t bpid) 295 { 296 struct rte_crypto_sym_op *sym_op = op->sym; 297 struct ctxt_priv *priv = sess->ctxt; 298 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 299 struct sec_flow_context *flc; 300 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len; 301 int icv_len = sess->digest_length; 302 uint8_t *old_icv; 303 struct rte_mbuf *mbuf; 304 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 305 sess->iv.offset); 306 307 if (sym_op->m_dst) 308 mbuf = sym_op->m_dst; 309 else 310 mbuf = sym_op->m_src; 311 312 /* first FLE entry used to store mbuf and session ctxt */ 313 fle = (struct qbman_fle *)rte_malloc(NULL, 314 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 315 RTE_CACHE_LINE_SIZE); 316 if (unlikely(!fle)) { 317 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE"); 318 return -1; 319 } 320 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 321 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 322 DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv); 323 324 op_fle = fle + 1; 325 ip_fle = fle + 2; 326 sge = fle + 3; 327 328 /* Save the shared descriptor */ 329 flc = &priv->flc_desc[0].flc; 330 331 /* Configure FD as a FRAME LIST */ 332 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 333 DPAA2_SET_FD_COMPOUND_FMT(fd); 334 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 335 336 DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n" 337 "iv-len=%d data_off: 0x%x\n", 338 sym_op->aead.data.offset, 339 sym_op->aead.data.length, 340 sess->digest_length, 341 sess->iv.length, 342 sym_op->m_src->data_off); 343 344 /* Configure Output FLE with Scatter/Gather Entry */ 345 DPAA2_SET_FLE_SG_EXT(op_fle); 346 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 347 348 if (auth_only_len) 349 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len); 350 351 op_fle->length = (sess->dir == DIR_ENC) ? 352 (sym_op->aead.data.length + icv_len) : 353 sym_op->aead.data.length; 354 355 /* Configure Output SGE for Encap/Decap */ 356 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 357 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset); 358 sge->length = mbuf->data_len - sym_op->aead.data.offset; 359 360 mbuf = mbuf->next; 361 /* o/p segs */ 362 while (mbuf) { 363 sge++; 364 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 365 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 366 sge->length = mbuf->data_len; 367 mbuf = mbuf->next; 368 } 369 sge->length -= icv_len; 370 371 if (sess->dir == DIR_ENC) { 372 sge++; 373 DPAA2_SET_FLE_ADDR(sge, 374 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data)); 375 sge->length = icv_len; 376 } 377 DPAA2_SET_FLE_FIN(sge); 378 379 sge++; 380 mbuf = sym_op->m_src; 381 382 /* Configure Input FLE with Scatter/Gather Entry */ 383 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 384 DPAA2_SET_FLE_SG_EXT(ip_fle); 385 DPAA2_SET_FLE_FIN(ip_fle); 386 ip_fle->length = (sess->dir == DIR_ENC) ? 387 (sym_op->aead.data.length + sess->iv.length + auth_only_len) : 388 (sym_op->aead.data.length + sess->iv.length + auth_only_len + 389 icv_len); 390 391 /* Configure Input SGE for Encap/Decap */ 392 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr)); 393 sge->length = sess->iv.length; 394 395 sge++; 396 if (auth_only_len) { 397 DPAA2_SET_FLE_ADDR(sge, 398 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data)); 399 sge->length = auth_only_len; 400 sge++; 401 } 402 403 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 404 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + 405 mbuf->data_off); 406 sge->length = mbuf->data_len - sym_op->aead.data.offset; 407 408 mbuf = mbuf->next; 409 /* i/p segs */ 410 while (mbuf) { 411 sge++; 412 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 413 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 414 sge->length = mbuf->data_len; 415 mbuf = mbuf->next; 416 } 417 418 if (sess->dir == DIR_DEC) { 419 sge++; 420 old_icv = (uint8_t *)(sge + 1); 421 memcpy(old_icv, sym_op->aead.digest.data, icv_len); 422 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 423 sge->length = icv_len; 424 } 425 426 DPAA2_SET_FLE_FIN(sge); 427 if (auth_only_len) { 428 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len); 429 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 430 } 431 DPAA2_SET_FD_LEN(fd, ip_fle->length); 432 433 return 0; 434 } 435 436 static inline int 437 build_authenc_gcm_fd(dpaa2_sec_session *sess, 438 struct rte_crypto_op *op, 439 struct qbman_fd *fd, uint16_t bpid) 440 { 441 struct rte_crypto_sym_op *sym_op = op->sym; 442 struct ctxt_priv *priv = sess->ctxt; 443 struct qbman_fle *fle, *sge; 444 struct sec_flow_context *flc; 445 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len; 446 int icv_len = sess->digest_length, retval; 447 uint8_t *old_icv; 448 struct rte_mbuf *dst; 449 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 450 sess->iv.offset); 451 452 if (sym_op->m_dst) 453 dst = sym_op->m_dst; 454 else 455 dst = sym_op->m_src; 456 457 /* TODO we are using the first FLE entry to store Mbuf and session ctxt. 458 * Currently we donot know which FLE has the mbuf stored. 459 * So while retreiving we can go back 1 FLE from the FD -ADDR 460 * to get the MBUF Addr from the previous FLE. 461 * We can have a better approach to use the inline Mbuf 462 */ 463 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 464 if (retval) { 465 DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE"); 466 return -1; 467 } 468 memset(fle, 0, FLE_POOL_BUF_SIZE); 469 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 470 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 471 fle = fle + 1; 472 sge = fle + 2; 473 if (likely(bpid < MAX_BPID)) { 474 DPAA2_SET_FD_BPID(fd, bpid); 475 DPAA2_SET_FLE_BPID(fle, bpid); 476 DPAA2_SET_FLE_BPID(fle + 1, bpid); 477 DPAA2_SET_FLE_BPID(sge, bpid); 478 DPAA2_SET_FLE_BPID(sge + 1, bpid); 479 DPAA2_SET_FLE_BPID(sge + 2, bpid); 480 DPAA2_SET_FLE_BPID(sge + 3, bpid); 481 } else { 482 DPAA2_SET_FD_IVP(fd); 483 DPAA2_SET_FLE_IVP(fle); 484 DPAA2_SET_FLE_IVP((fle + 1)); 485 DPAA2_SET_FLE_IVP(sge); 486 DPAA2_SET_FLE_IVP((sge + 1)); 487 DPAA2_SET_FLE_IVP((sge + 2)); 488 DPAA2_SET_FLE_IVP((sge + 3)); 489 } 490 491 /* Save the shared descriptor */ 492 flc = &priv->flc_desc[0].flc; 493 /* Configure FD as a FRAME LIST */ 494 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 495 DPAA2_SET_FD_COMPOUND_FMT(fd); 496 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 497 498 DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n" 499 "iv-len=%d data_off: 0x%x\n", 500 sym_op->aead.data.offset, 501 sym_op->aead.data.length, 502 sess->digest_length, 503 sess->iv.length, 504 sym_op->m_src->data_off); 505 506 /* Configure Output FLE with Scatter/Gather Entry */ 507 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 508 if (auth_only_len) 509 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 510 fle->length = (sess->dir == DIR_ENC) ? 511 (sym_op->aead.data.length + icv_len) : 512 sym_op->aead.data.length; 513 514 DPAA2_SET_FLE_SG_EXT(fle); 515 516 /* Configure Output SGE for Encap/Decap */ 517 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 518 DPAA2_SET_FLE_OFFSET(sge, dst->data_off + sym_op->aead.data.offset); 519 sge->length = sym_op->aead.data.length; 520 521 if (sess->dir == DIR_ENC) { 522 sge++; 523 DPAA2_SET_FLE_ADDR(sge, 524 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data)); 525 sge->length = sess->digest_length; 526 } 527 DPAA2_SET_FLE_FIN(sge); 528 529 sge++; 530 fle++; 531 532 /* Configure Input FLE with Scatter/Gather Entry */ 533 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 534 DPAA2_SET_FLE_SG_EXT(fle); 535 DPAA2_SET_FLE_FIN(fle); 536 fle->length = (sess->dir == DIR_ENC) ? 537 (sym_op->aead.data.length + sess->iv.length + auth_only_len) : 538 (sym_op->aead.data.length + sess->iv.length + auth_only_len + 539 sess->digest_length); 540 541 /* Configure Input SGE for Encap/Decap */ 542 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr)); 543 sge->length = sess->iv.length; 544 sge++; 545 if (auth_only_len) { 546 DPAA2_SET_FLE_ADDR(sge, 547 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data)); 548 sge->length = auth_only_len; 549 DPAA2_SET_FLE_BPID(sge, bpid); 550 sge++; 551 } 552 553 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 554 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + 555 sym_op->m_src->data_off); 556 sge->length = sym_op->aead.data.length; 557 if (sess->dir == DIR_DEC) { 558 sge++; 559 old_icv = (uint8_t *)(sge + 1); 560 memcpy(old_icv, sym_op->aead.digest.data, 561 sess->digest_length); 562 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 563 sge->length = sess->digest_length; 564 } 565 DPAA2_SET_FLE_FIN(sge); 566 567 if (auth_only_len) { 568 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 569 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 570 } 571 572 DPAA2_SET_FD_LEN(fd, fle->length); 573 return 0; 574 } 575 576 static inline int 577 build_authenc_sg_fd(dpaa2_sec_session *sess, 578 struct rte_crypto_op *op, 579 struct qbman_fd *fd, __rte_unused uint16_t bpid) 580 { 581 struct rte_crypto_sym_op *sym_op = op->sym; 582 struct ctxt_priv *priv = sess->ctxt; 583 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 584 struct sec_flow_context *flc; 585 uint16_t auth_hdr_len = sym_op->cipher.data.offset - 586 sym_op->auth.data.offset; 587 uint16_t auth_tail_len = sym_op->auth.data.length - 588 sym_op->cipher.data.length - auth_hdr_len; 589 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len; 590 int icv_len = sess->digest_length; 591 uint8_t *old_icv; 592 struct rte_mbuf *mbuf; 593 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 594 sess->iv.offset); 595 596 if (sym_op->m_dst) 597 mbuf = sym_op->m_dst; 598 else 599 mbuf = sym_op->m_src; 600 601 /* first FLE entry used to store mbuf and session ctxt */ 602 fle = (struct qbman_fle *)rte_malloc(NULL, 603 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 604 RTE_CACHE_LINE_SIZE); 605 if (unlikely(!fle)) { 606 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE"); 607 return -1; 608 } 609 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 610 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 611 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 612 613 op_fle = fle + 1; 614 ip_fle = fle + 2; 615 sge = fle + 3; 616 617 /* Save the shared descriptor */ 618 flc = &priv->flc_desc[0].flc; 619 620 /* Configure FD as a FRAME LIST */ 621 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 622 DPAA2_SET_FD_COMPOUND_FMT(fd); 623 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 624 625 DPAA2_SEC_DP_DEBUG( 626 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n" 627 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", 628 sym_op->auth.data.offset, 629 sym_op->auth.data.length, 630 sess->digest_length, 631 sym_op->cipher.data.offset, 632 sym_op->cipher.data.length, 633 sess->iv.length, 634 sym_op->m_src->data_off); 635 636 /* Configure Output FLE with Scatter/Gather Entry */ 637 DPAA2_SET_FLE_SG_EXT(op_fle); 638 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 639 640 if (auth_only_len) 641 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len); 642 643 op_fle->length = (sess->dir == DIR_ENC) ? 644 (sym_op->cipher.data.length + icv_len) : 645 sym_op->cipher.data.length; 646 647 /* Configure Output SGE for Encap/Decap */ 648 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 649 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset); 650 sge->length = mbuf->data_len - sym_op->auth.data.offset; 651 652 mbuf = mbuf->next; 653 /* o/p segs */ 654 while (mbuf) { 655 sge++; 656 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 657 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 658 sge->length = mbuf->data_len; 659 mbuf = mbuf->next; 660 } 661 sge->length -= icv_len; 662 663 if (sess->dir == DIR_ENC) { 664 sge++; 665 DPAA2_SET_FLE_ADDR(sge, 666 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 667 sge->length = icv_len; 668 } 669 DPAA2_SET_FLE_FIN(sge); 670 671 sge++; 672 mbuf = sym_op->m_src; 673 674 /* Configure Input FLE with Scatter/Gather Entry */ 675 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 676 DPAA2_SET_FLE_SG_EXT(ip_fle); 677 DPAA2_SET_FLE_FIN(ip_fle); 678 ip_fle->length = (sess->dir == DIR_ENC) ? 679 (sym_op->auth.data.length + sess->iv.length) : 680 (sym_op->auth.data.length + sess->iv.length + 681 icv_len); 682 683 /* Configure Input SGE for Encap/Decap */ 684 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 685 sge->length = sess->iv.length; 686 687 sge++; 688 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 689 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + 690 mbuf->data_off); 691 sge->length = mbuf->data_len - sym_op->auth.data.offset; 692 693 mbuf = mbuf->next; 694 /* i/p segs */ 695 while (mbuf) { 696 sge++; 697 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 698 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 699 sge->length = mbuf->data_len; 700 mbuf = mbuf->next; 701 } 702 sge->length -= icv_len; 703 704 if (sess->dir == DIR_DEC) { 705 sge++; 706 old_icv = (uint8_t *)(sge + 1); 707 memcpy(old_icv, sym_op->auth.digest.data, 708 icv_len); 709 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 710 sge->length = icv_len; 711 } 712 713 DPAA2_SET_FLE_FIN(sge); 714 if (auth_only_len) { 715 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len); 716 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 717 } 718 DPAA2_SET_FD_LEN(fd, ip_fle->length); 719 720 return 0; 721 } 722 723 static inline int 724 build_authenc_fd(dpaa2_sec_session *sess, 725 struct rte_crypto_op *op, 726 struct qbman_fd *fd, uint16_t bpid) 727 { 728 struct rte_crypto_sym_op *sym_op = op->sym; 729 struct ctxt_priv *priv = sess->ctxt; 730 struct qbman_fle *fle, *sge; 731 struct sec_flow_context *flc; 732 uint16_t auth_hdr_len = sym_op->cipher.data.offset - 733 sym_op->auth.data.offset; 734 uint16_t auth_tail_len = sym_op->auth.data.length - 735 sym_op->cipher.data.length - auth_hdr_len; 736 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len; 737 738 int icv_len = sess->digest_length, retval; 739 uint8_t *old_icv; 740 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 741 sess->iv.offset); 742 struct rte_mbuf *dst; 743 744 if (sym_op->m_dst) 745 dst = sym_op->m_dst; 746 else 747 dst = sym_op->m_src; 748 749 /* we are using the first FLE entry to store Mbuf. 750 * Currently we donot know which FLE has the mbuf stored. 751 * So while retreiving we can go back 1 FLE from the FD -ADDR 752 * to get the MBUF Addr from the previous FLE. 753 * We can have a better approach to use the inline Mbuf 754 */ 755 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 756 if (retval) { 757 DPAA2_SEC_ERR("Memory alloc failed for SGE"); 758 return -1; 759 } 760 memset(fle, 0, FLE_POOL_BUF_SIZE); 761 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 762 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 763 fle = fle + 1; 764 sge = fle + 2; 765 if (likely(bpid < MAX_BPID)) { 766 DPAA2_SET_FD_BPID(fd, bpid); 767 DPAA2_SET_FLE_BPID(fle, bpid); 768 DPAA2_SET_FLE_BPID(fle + 1, bpid); 769 DPAA2_SET_FLE_BPID(sge, bpid); 770 DPAA2_SET_FLE_BPID(sge + 1, bpid); 771 DPAA2_SET_FLE_BPID(sge + 2, bpid); 772 DPAA2_SET_FLE_BPID(sge + 3, bpid); 773 } else { 774 DPAA2_SET_FD_IVP(fd); 775 DPAA2_SET_FLE_IVP(fle); 776 DPAA2_SET_FLE_IVP((fle + 1)); 777 DPAA2_SET_FLE_IVP(sge); 778 DPAA2_SET_FLE_IVP((sge + 1)); 779 DPAA2_SET_FLE_IVP((sge + 2)); 780 DPAA2_SET_FLE_IVP((sge + 3)); 781 } 782 783 /* Save the shared descriptor */ 784 flc = &priv->flc_desc[0].flc; 785 /* Configure FD as a FRAME LIST */ 786 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 787 DPAA2_SET_FD_COMPOUND_FMT(fd); 788 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 789 790 DPAA2_SEC_DP_DEBUG( 791 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n" 792 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", 793 sym_op->auth.data.offset, 794 sym_op->auth.data.length, 795 sess->digest_length, 796 sym_op->cipher.data.offset, 797 sym_op->cipher.data.length, 798 sess->iv.length, 799 sym_op->m_src->data_off); 800 801 /* Configure Output FLE with Scatter/Gather Entry */ 802 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 803 if (auth_only_len) 804 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 805 fle->length = (sess->dir == DIR_ENC) ? 806 (sym_op->cipher.data.length + icv_len) : 807 sym_op->cipher.data.length; 808 809 DPAA2_SET_FLE_SG_EXT(fle); 810 811 /* Configure Output SGE for Encap/Decap */ 812 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 813 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + 814 dst->data_off); 815 sge->length = sym_op->cipher.data.length; 816 817 if (sess->dir == DIR_ENC) { 818 sge++; 819 DPAA2_SET_FLE_ADDR(sge, 820 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 821 sge->length = sess->digest_length; 822 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 823 sess->iv.length)); 824 } 825 DPAA2_SET_FLE_FIN(sge); 826 827 sge++; 828 fle++; 829 830 /* Configure Input FLE with Scatter/Gather Entry */ 831 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 832 DPAA2_SET_FLE_SG_EXT(fle); 833 DPAA2_SET_FLE_FIN(fle); 834 fle->length = (sess->dir == DIR_ENC) ? 835 (sym_op->auth.data.length + sess->iv.length) : 836 (sym_op->auth.data.length + sess->iv.length + 837 sess->digest_length); 838 839 /* Configure Input SGE for Encap/Decap */ 840 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 841 sge->length = sess->iv.length; 842 sge++; 843 844 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 845 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + 846 sym_op->m_src->data_off); 847 sge->length = sym_op->auth.data.length; 848 if (sess->dir == DIR_DEC) { 849 sge++; 850 old_icv = (uint8_t *)(sge + 1); 851 memcpy(old_icv, sym_op->auth.digest.data, 852 sess->digest_length); 853 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 854 sge->length = sess->digest_length; 855 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 856 sess->digest_length + 857 sess->iv.length)); 858 } 859 DPAA2_SET_FLE_FIN(sge); 860 if (auth_only_len) { 861 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 862 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 863 } 864 return 0; 865 } 866 867 static inline int build_auth_sg_fd( 868 dpaa2_sec_session *sess, 869 struct rte_crypto_op *op, 870 struct qbman_fd *fd, 871 __rte_unused uint16_t bpid) 872 { 873 struct rte_crypto_sym_op *sym_op = op->sym; 874 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 875 struct sec_flow_context *flc; 876 struct ctxt_priv *priv = sess->ctxt; 877 int data_len, data_offset; 878 uint8_t *old_digest; 879 struct rte_mbuf *mbuf; 880 881 data_len = sym_op->auth.data.length; 882 data_offset = sym_op->auth.data.offset; 883 884 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 885 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 886 if ((data_len & 7) || (data_offset & 7)) { 887 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes"); 888 return -1; 889 } 890 891 data_len = data_len >> 3; 892 data_offset = data_offset >> 3; 893 } 894 895 mbuf = sym_op->m_src; 896 fle = (struct qbman_fle *)rte_malloc(NULL, 897 FLE_SG_MEM_SIZE(mbuf->nb_segs), 898 RTE_CACHE_LINE_SIZE); 899 if (unlikely(!fle)) { 900 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE"); 901 return -1; 902 } 903 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs)); 904 /* first FLE entry used to store mbuf and session ctxt */ 905 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 906 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 907 op_fle = fle + 1; 908 ip_fle = fle + 2; 909 sge = fle + 3; 910 911 flc = &priv->flc_desc[DESC_INITFINAL].flc; 912 /* sg FD */ 913 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 914 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 915 DPAA2_SET_FD_COMPOUND_FMT(fd); 916 917 /* o/p fle */ 918 DPAA2_SET_FLE_ADDR(op_fle, 919 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 920 op_fle->length = sess->digest_length; 921 922 /* i/p fle */ 923 DPAA2_SET_FLE_SG_EXT(ip_fle); 924 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 925 ip_fle->length = data_len; 926 927 if (sess->iv.length) { 928 uint8_t *iv_ptr; 929 930 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 931 sess->iv.offset); 932 933 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 934 iv_ptr = conv_to_snow_f9_iv(iv_ptr); 935 sge->length = 12; 936 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 937 iv_ptr = conv_to_zuc_eia_iv(iv_ptr); 938 sge->length = 8; 939 } else { 940 sge->length = sess->iv.length; 941 } 942 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 943 ip_fle->length += sge->length; 944 sge++; 945 } 946 /* i/p 1st seg */ 947 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 948 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off); 949 950 if (data_len <= (mbuf->data_len - data_offset)) { 951 sge->length = data_len; 952 data_len = 0; 953 } else { 954 sge->length = mbuf->data_len - data_offset; 955 956 /* remaining i/p segs */ 957 while ((data_len = data_len - sge->length) && 958 (mbuf = mbuf->next)) { 959 sge++; 960 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 961 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 962 if (data_len > mbuf->data_len) 963 sge->length = mbuf->data_len; 964 else 965 sge->length = data_len; 966 } 967 } 968 969 if (sess->dir == DIR_DEC) { 970 /* Digest verification case */ 971 sge++; 972 old_digest = (uint8_t *)(sge + 1); 973 rte_memcpy(old_digest, sym_op->auth.digest.data, 974 sess->digest_length); 975 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest)); 976 sge->length = sess->digest_length; 977 ip_fle->length += sess->digest_length; 978 } 979 DPAA2_SET_FLE_FIN(sge); 980 DPAA2_SET_FLE_FIN(ip_fle); 981 DPAA2_SET_FD_LEN(fd, ip_fle->length); 982 983 return 0; 984 } 985 986 static inline int 987 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 988 struct qbman_fd *fd, uint16_t bpid) 989 { 990 struct rte_crypto_sym_op *sym_op = op->sym; 991 struct qbman_fle *fle, *sge; 992 struct sec_flow_context *flc; 993 struct ctxt_priv *priv = sess->ctxt; 994 int data_len, data_offset; 995 uint8_t *old_digest; 996 int retval; 997 998 data_len = sym_op->auth.data.length; 999 data_offset = sym_op->auth.data.offset; 1000 1001 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 1002 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 1003 if ((data_len & 7) || (data_offset & 7)) { 1004 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes"); 1005 return -1; 1006 } 1007 1008 data_len = data_len >> 3; 1009 data_offset = data_offset >> 3; 1010 } 1011 1012 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 1013 if (retval) { 1014 DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE"); 1015 return -1; 1016 } 1017 memset(fle, 0, FLE_POOL_BUF_SIZE); 1018 /* TODO we are using the first FLE entry to store Mbuf. 1019 * Currently we donot know which FLE has the mbuf stored. 1020 * So while retreiving we can go back 1 FLE from the FD -ADDR 1021 * to get the MBUF Addr from the previous FLE. 1022 * We can have a better approach to use the inline Mbuf 1023 */ 1024 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 1025 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 1026 fle = fle + 1; 1027 sge = fle + 2; 1028 1029 if (likely(bpid < MAX_BPID)) { 1030 DPAA2_SET_FD_BPID(fd, bpid); 1031 DPAA2_SET_FLE_BPID(fle, bpid); 1032 DPAA2_SET_FLE_BPID(fle + 1, bpid); 1033 DPAA2_SET_FLE_BPID(sge, bpid); 1034 DPAA2_SET_FLE_BPID(sge + 1, bpid); 1035 } else { 1036 DPAA2_SET_FD_IVP(fd); 1037 DPAA2_SET_FLE_IVP(fle); 1038 DPAA2_SET_FLE_IVP((fle + 1)); 1039 DPAA2_SET_FLE_IVP(sge); 1040 DPAA2_SET_FLE_IVP((sge + 1)); 1041 } 1042 1043 flc = &priv->flc_desc[DESC_INITFINAL].flc; 1044 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1045 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 1046 DPAA2_SET_FD_COMPOUND_FMT(fd); 1047 1048 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 1049 fle->length = sess->digest_length; 1050 fle++; 1051 1052 /* Setting input FLE */ 1053 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 1054 DPAA2_SET_FLE_SG_EXT(fle); 1055 fle->length = data_len; 1056 1057 if (sess->iv.length) { 1058 uint8_t *iv_ptr; 1059 1060 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1061 sess->iv.offset); 1062 1063 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 1064 iv_ptr = conv_to_snow_f9_iv(iv_ptr); 1065 sge->length = 12; 1066 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 1067 iv_ptr = conv_to_zuc_eia_iv(iv_ptr); 1068 sge->length = 8; 1069 } else { 1070 sge->length = sess->iv.length; 1071 } 1072 1073 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1074 fle->length = fle->length + sge->length; 1075 sge++; 1076 } 1077 1078 /* Setting data to authenticate */ 1079 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 1080 DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off); 1081 sge->length = data_len; 1082 1083 if (sess->dir == DIR_DEC) { 1084 sge++; 1085 old_digest = (uint8_t *)(sge + 1); 1086 rte_memcpy(old_digest, sym_op->auth.digest.data, 1087 sess->digest_length); 1088 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest)); 1089 sge->length = sess->digest_length; 1090 fle->length = fle->length + sess->digest_length; 1091 } 1092 1093 DPAA2_SET_FLE_FIN(sge); 1094 DPAA2_SET_FLE_FIN(fle); 1095 DPAA2_SET_FD_LEN(fd, fle->length); 1096 1097 return 0; 1098 } 1099 1100 static int 1101 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 1102 struct qbman_fd *fd, __rte_unused uint16_t bpid) 1103 { 1104 struct rte_crypto_sym_op *sym_op = op->sym; 1105 struct qbman_fle *ip_fle, *op_fle, *sge, *fle; 1106 int data_len, data_offset; 1107 struct sec_flow_context *flc; 1108 struct ctxt_priv *priv = sess->ctxt; 1109 struct rte_mbuf *mbuf; 1110 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1111 sess->iv.offset); 1112 1113 data_len = sym_op->cipher.data.length; 1114 data_offset = sym_op->cipher.data.offset; 1115 1116 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 1117 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 1118 if ((data_len & 7) || (data_offset & 7)) { 1119 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes"); 1120 return -1; 1121 } 1122 1123 data_len = data_len >> 3; 1124 data_offset = data_offset >> 3; 1125 } 1126 1127 if (sym_op->m_dst) 1128 mbuf = sym_op->m_dst; 1129 else 1130 mbuf = sym_op->m_src; 1131 1132 /* first FLE entry used to store mbuf and session ctxt */ 1133 fle = (struct qbman_fle *)rte_malloc(NULL, 1134 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 1135 RTE_CACHE_LINE_SIZE); 1136 if (!fle) { 1137 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE"); 1138 return -1; 1139 } 1140 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 1141 /* first FLE entry used to store mbuf and session ctxt */ 1142 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 1143 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 1144 1145 op_fle = fle + 1; 1146 ip_fle = fle + 2; 1147 sge = fle + 3; 1148 1149 flc = &priv->flc_desc[0].flc; 1150 1151 DPAA2_SEC_DP_DEBUG( 1152 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d" 1153 " data_off: 0x%x\n", 1154 data_offset, 1155 data_len, 1156 sess->iv.length, 1157 sym_op->m_src->data_off); 1158 1159 /* o/p fle */ 1160 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 1161 op_fle->length = data_len; 1162 DPAA2_SET_FLE_SG_EXT(op_fle); 1163 1164 /* o/p 1st seg */ 1165 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1166 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off); 1167 sge->length = mbuf->data_len - data_offset; 1168 1169 mbuf = mbuf->next; 1170 /* o/p segs */ 1171 while (mbuf) { 1172 sge++; 1173 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1174 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 1175 sge->length = mbuf->data_len; 1176 mbuf = mbuf->next; 1177 } 1178 DPAA2_SET_FLE_FIN(sge); 1179 1180 DPAA2_SEC_DP_DEBUG( 1181 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n", 1182 flc, fle, fle->addr_hi, fle->addr_lo, 1183 fle->length); 1184 1185 /* i/p fle */ 1186 mbuf = sym_op->m_src; 1187 sge++; 1188 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 1189 ip_fle->length = sess->iv.length + data_len; 1190 DPAA2_SET_FLE_SG_EXT(ip_fle); 1191 1192 /* i/p IV */ 1193 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1194 DPAA2_SET_FLE_OFFSET(sge, 0); 1195 sge->length = sess->iv.length; 1196 1197 sge++; 1198 1199 /* i/p 1st seg */ 1200 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1201 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off); 1202 sge->length = mbuf->data_len - data_offset; 1203 1204 mbuf = mbuf->next; 1205 /* i/p segs */ 1206 while (mbuf) { 1207 sge++; 1208 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1209 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 1210 sge->length = mbuf->data_len; 1211 mbuf = mbuf->next; 1212 } 1213 DPAA2_SET_FLE_FIN(sge); 1214 DPAA2_SET_FLE_FIN(ip_fle); 1215 1216 /* sg fd */ 1217 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 1218 DPAA2_SET_FD_LEN(fd, ip_fle->length); 1219 DPAA2_SET_FD_COMPOUND_FMT(fd); 1220 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1221 1222 DPAA2_SEC_DP_DEBUG( 1223 "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d" 1224 " off =%d, len =%d\n", 1225 DPAA2_GET_FD_ADDR(fd), 1226 DPAA2_GET_FD_BPID(fd), 1227 rte_dpaa2_bpid_info[bpid].meta_data_size, 1228 DPAA2_GET_FD_OFFSET(fd), 1229 DPAA2_GET_FD_LEN(fd)); 1230 return 0; 1231 } 1232 1233 static int 1234 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 1235 struct qbman_fd *fd, uint16_t bpid) 1236 { 1237 struct rte_crypto_sym_op *sym_op = op->sym; 1238 struct qbman_fle *fle, *sge; 1239 int retval, data_len, data_offset; 1240 struct sec_flow_context *flc; 1241 struct ctxt_priv *priv = sess->ctxt; 1242 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1243 sess->iv.offset); 1244 struct rte_mbuf *dst; 1245 1246 data_len = sym_op->cipher.data.length; 1247 data_offset = sym_op->cipher.data.offset; 1248 1249 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 1250 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 1251 if ((data_len & 7) || (data_offset & 7)) { 1252 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes"); 1253 return -1; 1254 } 1255 1256 data_len = data_len >> 3; 1257 data_offset = data_offset >> 3; 1258 } 1259 1260 if (sym_op->m_dst) 1261 dst = sym_op->m_dst; 1262 else 1263 dst = sym_op->m_src; 1264 1265 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 1266 if (retval) { 1267 DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE"); 1268 return -1; 1269 } 1270 memset(fle, 0, FLE_POOL_BUF_SIZE); 1271 /* TODO we are using the first FLE entry to store Mbuf. 1272 * Currently we donot know which FLE has the mbuf stored. 1273 * So while retreiving we can go back 1 FLE from the FD -ADDR 1274 * to get the MBUF Addr from the previous FLE. 1275 * We can have a better approach to use the inline Mbuf 1276 */ 1277 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 1278 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 1279 fle = fle + 1; 1280 sge = fle + 2; 1281 1282 if (likely(bpid < MAX_BPID)) { 1283 DPAA2_SET_FD_BPID(fd, bpid); 1284 DPAA2_SET_FLE_BPID(fle, bpid); 1285 DPAA2_SET_FLE_BPID(fle + 1, bpid); 1286 DPAA2_SET_FLE_BPID(sge, bpid); 1287 DPAA2_SET_FLE_BPID(sge + 1, bpid); 1288 } else { 1289 DPAA2_SET_FD_IVP(fd); 1290 DPAA2_SET_FLE_IVP(fle); 1291 DPAA2_SET_FLE_IVP((fle + 1)); 1292 DPAA2_SET_FLE_IVP(sge); 1293 DPAA2_SET_FLE_IVP((sge + 1)); 1294 } 1295 1296 flc = &priv->flc_desc[0].flc; 1297 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 1298 DPAA2_SET_FD_LEN(fd, data_len + sess->iv.length); 1299 DPAA2_SET_FD_COMPOUND_FMT(fd); 1300 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1301 1302 DPAA2_SEC_DP_DEBUG( 1303 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d," 1304 " data_off: 0x%x\n", 1305 data_offset, 1306 data_len, 1307 sess->iv.length, 1308 sym_op->m_src->data_off); 1309 1310 DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 1311 DPAA2_SET_FLE_OFFSET(fle, data_offset + dst->data_off); 1312 1313 fle->length = data_len + sess->iv.length; 1314 1315 DPAA2_SEC_DP_DEBUG( 1316 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n", 1317 flc, fle, fle->addr_hi, fle->addr_lo, 1318 fle->length); 1319 1320 fle++; 1321 1322 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 1323 fle->length = data_len + sess->iv.length; 1324 1325 DPAA2_SET_FLE_SG_EXT(fle); 1326 1327 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1328 sge->length = sess->iv.length; 1329 1330 sge++; 1331 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 1332 DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off); 1333 1334 sge->length = data_len; 1335 DPAA2_SET_FLE_FIN(sge); 1336 DPAA2_SET_FLE_FIN(fle); 1337 1338 DPAA2_SEC_DP_DEBUG( 1339 "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d" 1340 " off =%d, len =%d\n", 1341 DPAA2_GET_FD_ADDR(fd), 1342 DPAA2_GET_FD_BPID(fd), 1343 rte_dpaa2_bpid_info[bpid].meta_data_size, 1344 DPAA2_GET_FD_OFFSET(fd), 1345 DPAA2_GET_FD_LEN(fd)); 1346 1347 return 0; 1348 } 1349 1350 static inline int 1351 build_sec_fd(struct rte_crypto_op *op, 1352 struct qbman_fd *fd, uint16_t bpid) 1353 { 1354 int ret = -1; 1355 dpaa2_sec_session *sess; 1356 1357 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) 1358 sess = (dpaa2_sec_session *)get_sym_session_private_data( 1359 op->sym->session, cryptodev_driver_id); 1360 #ifdef RTE_LIBRTE_SECURITY 1361 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) 1362 sess = (dpaa2_sec_session *)get_sec_session_private_data( 1363 op->sym->sec_session); 1364 #endif 1365 else 1366 return -1; 1367 1368 if (!sess) 1369 return -1; 1370 1371 /* Any of the buffer is segmented*/ 1372 if (!rte_pktmbuf_is_contiguous(op->sym->m_src) || 1373 ((op->sym->m_dst != NULL) && 1374 !rte_pktmbuf_is_contiguous(op->sym->m_dst))) { 1375 switch (sess->ctxt_type) { 1376 case DPAA2_SEC_CIPHER: 1377 ret = build_cipher_sg_fd(sess, op, fd, bpid); 1378 break; 1379 case DPAA2_SEC_AUTH: 1380 ret = build_auth_sg_fd(sess, op, fd, bpid); 1381 break; 1382 case DPAA2_SEC_AEAD: 1383 ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid); 1384 break; 1385 case DPAA2_SEC_CIPHER_HASH: 1386 ret = build_authenc_sg_fd(sess, op, fd, bpid); 1387 break; 1388 #ifdef RTE_LIBRTE_SECURITY 1389 case DPAA2_SEC_IPSEC: 1390 case DPAA2_SEC_PDCP: 1391 ret = build_proto_compound_sg_fd(sess, op, fd, bpid); 1392 break; 1393 #endif 1394 case DPAA2_SEC_HASH_CIPHER: 1395 default: 1396 DPAA2_SEC_ERR("error: Unsupported session"); 1397 } 1398 } else { 1399 switch (sess->ctxt_type) { 1400 case DPAA2_SEC_CIPHER: 1401 ret = build_cipher_fd(sess, op, fd, bpid); 1402 break; 1403 case DPAA2_SEC_AUTH: 1404 ret = build_auth_fd(sess, op, fd, bpid); 1405 break; 1406 case DPAA2_SEC_AEAD: 1407 ret = build_authenc_gcm_fd(sess, op, fd, bpid); 1408 break; 1409 case DPAA2_SEC_CIPHER_HASH: 1410 ret = build_authenc_fd(sess, op, fd, bpid); 1411 break; 1412 #ifdef RTE_LIBRTE_SECURITY 1413 case DPAA2_SEC_IPSEC: 1414 ret = build_proto_fd(sess, op, fd, bpid); 1415 break; 1416 case DPAA2_SEC_PDCP: 1417 ret = build_proto_compound_fd(sess, op, fd, bpid); 1418 break; 1419 #endif 1420 case DPAA2_SEC_HASH_CIPHER: 1421 default: 1422 DPAA2_SEC_ERR("error: Unsupported session"); 1423 } 1424 } 1425 return ret; 1426 } 1427 1428 static uint16_t 1429 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, 1430 uint16_t nb_ops) 1431 { 1432 /* Function to transmit the frames to given device and VQ*/ 1433 uint32_t loop; 1434 int32_t ret; 1435 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 1436 uint32_t frames_to_send, retry_count; 1437 struct qbman_eq_desc eqdesc; 1438 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 1439 struct qbman_swp *swp; 1440 uint16_t num_tx = 0; 1441 uint32_t flags[MAX_TX_RING_SLOTS] = {0}; 1442 /*todo - need to support multiple buffer pools */ 1443 uint16_t bpid; 1444 struct rte_mempool *mb_pool; 1445 1446 if (unlikely(nb_ops == 0)) 1447 return 0; 1448 1449 if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { 1450 DPAA2_SEC_ERR("sessionless crypto op not supported"); 1451 return 0; 1452 } 1453 /*Prepare enqueue descriptor*/ 1454 qbman_eq_desc_clear(&eqdesc); 1455 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); 1456 qbman_eq_desc_set_response(&eqdesc, 0, 0); 1457 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid); 1458 1459 if (!DPAA2_PER_LCORE_DPIO) { 1460 ret = dpaa2_affine_qbman_swp(); 1461 if (ret) { 1462 DPAA2_SEC_ERR("Failure in affining portal"); 1463 return 0; 1464 } 1465 } 1466 swp = DPAA2_PER_LCORE_PORTAL; 1467 1468 while (nb_ops) { 1469 frames_to_send = (nb_ops > dpaa2_eqcr_size) ? 1470 dpaa2_eqcr_size : nb_ops; 1471 1472 for (loop = 0; loop < frames_to_send; loop++) { 1473 if ((*ops)->sym->m_src->seqn) { 1474 uint8_t dqrr_index = (*ops)->sym->m_src->seqn - 1; 1475 1476 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index; 1477 DPAA2_PER_LCORE_DQRR_SIZE--; 1478 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index); 1479 (*ops)->sym->m_src->seqn = DPAA2_INVALID_MBUF_SEQN; 1480 } 1481 1482 /*Clear the unused FD fields before sending*/ 1483 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd)); 1484 mb_pool = (*ops)->sym->m_src->pool; 1485 bpid = mempool_to_bpid(mb_pool); 1486 ret = build_sec_fd(*ops, &fd_arr[loop], bpid); 1487 if (ret) { 1488 DPAA2_SEC_ERR("error: Improper packet contents" 1489 " for crypto operation"); 1490 goto skip_tx; 1491 } 1492 ops++; 1493 } 1494 1495 loop = 0; 1496 retry_count = 0; 1497 while (loop < frames_to_send) { 1498 ret = qbman_swp_enqueue_multiple(swp, &eqdesc, 1499 &fd_arr[loop], 1500 &flags[loop], 1501 frames_to_send - loop); 1502 if (unlikely(ret < 0)) { 1503 retry_count++; 1504 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) { 1505 num_tx += loop; 1506 nb_ops -= loop; 1507 goto skip_tx; 1508 } 1509 } else { 1510 loop += ret; 1511 retry_count = 0; 1512 } 1513 } 1514 1515 num_tx += loop; 1516 nb_ops -= loop; 1517 } 1518 skip_tx: 1519 dpaa2_qp->tx_vq.tx_pkts += num_tx; 1520 dpaa2_qp->tx_vq.err_pkts += nb_ops; 1521 return num_tx; 1522 } 1523 1524 #ifdef RTE_LIBRTE_SECURITY 1525 static inline struct rte_crypto_op * 1526 sec_simple_fd_to_mbuf(const struct qbman_fd *fd) 1527 { 1528 struct rte_crypto_op *op; 1529 uint16_t len = DPAA2_GET_FD_LEN(fd); 1530 int16_t diff = 0; 1531 dpaa2_sec_session *sess_priv __rte_unused; 1532 1533 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF( 1534 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)), 1535 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 1536 1537 diff = len - mbuf->pkt_len; 1538 mbuf->pkt_len += diff; 1539 mbuf->data_len += diff; 1540 op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova; 1541 mbuf->buf_iova = op->sym->aead.digest.phys_addr; 1542 op->sym->aead.digest.phys_addr = 0L; 1543 1544 sess_priv = (dpaa2_sec_session *)get_sec_session_private_data( 1545 op->sym->sec_session); 1546 if (sess_priv->dir == DIR_ENC) 1547 mbuf->data_off += SEC_FLC_DHR_OUTBOUND; 1548 else 1549 mbuf->data_off += SEC_FLC_DHR_INBOUND; 1550 1551 return op; 1552 } 1553 #endif 1554 1555 static inline struct rte_crypto_op * 1556 sec_fd_to_mbuf(const struct qbman_fd *fd) 1557 { 1558 struct qbman_fle *fle; 1559 struct rte_crypto_op *op; 1560 struct ctxt_priv *priv; 1561 struct rte_mbuf *dst, *src; 1562 1563 #ifdef RTE_LIBRTE_SECURITY 1564 if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single) 1565 return sec_simple_fd_to_mbuf(fd); 1566 #endif 1567 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); 1568 1569 DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n", 1570 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset); 1571 1572 /* we are using the first FLE entry to store Mbuf. 1573 * Currently we donot know which FLE has the mbuf stored. 1574 * So while retreiving we can go back 1 FLE from the FD -ADDR 1575 * to get the MBUF Addr from the previous FLE. 1576 * We can have a better approach to use the inline Mbuf 1577 */ 1578 1579 if (unlikely(DPAA2_GET_FD_IVP(fd))) { 1580 /* TODO complete it. */ 1581 DPAA2_SEC_ERR("error: non inline buffer"); 1582 return NULL; 1583 } 1584 op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1)); 1585 1586 /* Prefeth op */ 1587 src = op->sym->m_src; 1588 rte_prefetch0(src); 1589 1590 if (op->sym->m_dst) { 1591 dst = op->sym->m_dst; 1592 rte_prefetch0(dst); 1593 } else 1594 dst = src; 1595 1596 #ifdef RTE_LIBRTE_SECURITY 1597 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 1598 uint16_t len = DPAA2_GET_FD_LEN(fd); 1599 dst->pkt_len = len; 1600 while (dst->next != NULL) { 1601 len -= dst->data_len; 1602 dst = dst->next; 1603 } 1604 dst->data_len = len; 1605 } 1606 #endif 1607 DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p," 1608 " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n", 1609 (void *)dst, 1610 dst->buf_addr, 1611 DPAA2_GET_FD_ADDR(fd), 1612 DPAA2_GET_FD_BPID(fd), 1613 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, 1614 DPAA2_GET_FD_OFFSET(fd), 1615 DPAA2_GET_FD_LEN(fd)); 1616 1617 /* free the fle memory */ 1618 if (likely(rte_pktmbuf_is_contiguous(src))) { 1619 priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1); 1620 rte_mempool_put(priv->fle_pool, (void *)(fle-1)); 1621 } else 1622 rte_free((void *)(fle-1)); 1623 1624 return op; 1625 } 1626 1627 static uint16_t 1628 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, 1629 uint16_t nb_ops) 1630 { 1631 /* Function is responsible to receive frames for a given device and VQ*/ 1632 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 1633 struct qbman_result *dq_storage; 1634 uint32_t fqid = dpaa2_qp->rx_vq.fqid; 1635 int ret, num_rx = 0; 1636 uint8_t is_last = 0, status; 1637 struct qbman_swp *swp; 1638 const struct qbman_fd *fd; 1639 struct qbman_pull_desc pulldesc; 1640 1641 if (!DPAA2_PER_LCORE_DPIO) { 1642 ret = dpaa2_affine_qbman_swp(); 1643 if (ret) { 1644 DPAA2_SEC_ERR("Failure in affining portal"); 1645 return 0; 1646 } 1647 } 1648 swp = DPAA2_PER_LCORE_PORTAL; 1649 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0]; 1650 1651 qbman_pull_desc_clear(&pulldesc); 1652 qbman_pull_desc_set_numframes(&pulldesc, 1653 (nb_ops > dpaa2_dqrr_size) ? 1654 dpaa2_dqrr_size : nb_ops); 1655 qbman_pull_desc_set_fq(&pulldesc, fqid); 1656 qbman_pull_desc_set_storage(&pulldesc, dq_storage, 1657 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage), 1658 1); 1659 1660 /*Issue a volatile dequeue command. */ 1661 while (1) { 1662 if (qbman_swp_pull(swp, &pulldesc)) { 1663 DPAA2_SEC_WARN( 1664 "SEC VDQ command is not issued : QBMAN busy"); 1665 /* Portal was busy, try again */ 1666 continue; 1667 } 1668 break; 1669 }; 1670 1671 /* Receive the packets till Last Dequeue entry is found with 1672 * respect to the above issues PULL command. 1673 */ 1674 while (!is_last) { 1675 /* Check if the previous issued command is completed. 1676 * Also seems like the SWP is shared between the Ethernet Driver 1677 * and the SEC driver. 1678 */ 1679 while (!qbman_check_command_complete(dq_storage)) 1680 ; 1681 1682 /* Loop until the dq_storage is updated with 1683 * new token by QBMAN 1684 */ 1685 while (!qbman_check_new_result(dq_storage)) 1686 ; 1687 /* Check whether Last Pull command is Expired and 1688 * setting Condition for Loop termination 1689 */ 1690 if (qbman_result_DQ_is_pull_complete(dq_storage)) { 1691 is_last = 1; 1692 /* Check for valid frame. */ 1693 status = (uint8_t)qbman_result_DQ_flags(dq_storage); 1694 if (unlikely( 1695 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) { 1696 DPAA2_SEC_DP_DEBUG("No frame is delivered\n"); 1697 continue; 1698 } 1699 } 1700 1701 fd = qbman_result_DQ_fd(dq_storage); 1702 ops[num_rx] = sec_fd_to_mbuf(fd); 1703 1704 if (unlikely(fd->simple.frc)) { 1705 /* TODO Parse SEC errors */ 1706 DPAA2_SEC_ERR("SEC returned Error - %x", 1707 fd->simple.frc); 1708 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR; 1709 } else { 1710 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 1711 } 1712 1713 num_rx++; 1714 dq_storage++; 1715 } /* End of Packet Rx loop */ 1716 1717 dpaa2_qp->rx_vq.rx_pkts += num_rx; 1718 1719 DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx); 1720 /*Return the total number of packets received to DPAA2 app*/ 1721 return num_rx; 1722 } 1723 1724 /** Release queue pair */ 1725 static int 1726 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) 1727 { 1728 struct dpaa2_sec_qp *qp = 1729 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id]; 1730 1731 PMD_INIT_FUNC_TRACE(); 1732 1733 if (qp->rx_vq.q_storage) { 1734 dpaa2_free_dq_storage(qp->rx_vq.q_storage); 1735 rte_free(qp->rx_vq.q_storage); 1736 } 1737 rte_free(qp); 1738 1739 dev->data->queue_pairs[queue_pair_id] = NULL; 1740 1741 return 0; 1742 } 1743 1744 /** Setup a queue pair */ 1745 static int 1746 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, 1747 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf, 1748 __rte_unused int socket_id) 1749 { 1750 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 1751 struct dpaa2_sec_qp *qp; 1752 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 1753 struct dpseci_rx_queue_cfg cfg; 1754 int32_t retcode; 1755 1756 PMD_INIT_FUNC_TRACE(); 1757 1758 /* If qp is already in use free ring memory and qp metadata. */ 1759 if (dev->data->queue_pairs[qp_id] != NULL) { 1760 DPAA2_SEC_INFO("QP already setup"); 1761 return 0; 1762 } 1763 1764 DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p", 1765 dev, qp_id, qp_conf); 1766 1767 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 1768 1769 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp), 1770 RTE_CACHE_LINE_SIZE); 1771 if (!qp) { 1772 DPAA2_SEC_ERR("malloc failed for rx/tx queues"); 1773 return -1; 1774 } 1775 1776 qp->rx_vq.crypto_data = dev->data; 1777 qp->tx_vq.crypto_data = dev->data; 1778 qp->rx_vq.q_storage = rte_malloc("sec dq storage", 1779 sizeof(struct queue_storage_info_t), 1780 RTE_CACHE_LINE_SIZE); 1781 if (!qp->rx_vq.q_storage) { 1782 DPAA2_SEC_ERR("malloc failed for q_storage"); 1783 return -1; 1784 } 1785 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t)); 1786 1787 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) { 1788 DPAA2_SEC_ERR("Unable to allocate dequeue storage"); 1789 return -1; 1790 } 1791 1792 dev->data->queue_pairs[qp_id] = qp; 1793 1794 cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX; 1795 cfg.user_ctx = (size_t)(&qp->rx_vq); 1796 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 1797 qp_id, &cfg); 1798 return retcode; 1799 } 1800 1801 /** Returns the size of the aesni gcm session structure */ 1802 static unsigned int 1803 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) 1804 { 1805 PMD_INIT_FUNC_TRACE(); 1806 1807 return sizeof(dpaa2_sec_session); 1808 } 1809 1810 static int 1811 dpaa2_sec_cipher_init(struct rte_cryptodev *dev, 1812 struct rte_crypto_sym_xform *xform, 1813 dpaa2_sec_session *session) 1814 { 1815 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1816 struct alginfo cipherdata; 1817 int bufsize; 1818 struct ctxt_priv *priv; 1819 struct sec_flow_context *flc; 1820 1821 PMD_INIT_FUNC_TRACE(); 1822 1823 /* For SEC CIPHER only one descriptor is required. */ 1824 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1825 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 1826 RTE_CACHE_LINE_SIZE); 1827 if (priv == NULL) { 1828 DPAA2_SEC_ERR("No Memory for priv CTXT"); 1829 return -1; 1830 } 1831 1832 priv->fle_pool = dev_priv->fle_pool; 1833 1834 flc = &priv->flc_desc[0].flc; 1835 1836 session->ctxt_type = DPAA2_SEC_CIPHER; 1837 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, 1838 RTE_CACHE_LINE_SIZE); 1839 if (session->cipher_key.data == NULL) { 1840 DPAA2_SEC_ERR("No Memory for cipher key"); 1841 rte_free(priv); 1842 return -1; 1843 } 1844 session->cipher_key.length = xform->cipher.key.length; 1845 1846 memcpy(session->cipher_key.data, xform->cipher.key.data, 1847 xform->cipher.key.length); 1848 cipherdata.key = (size_t)session->cipher_key.data; 1849 cipherdata.keylen = session->cipher_key.length; 1850 cipherdata.key_enc_flags = 0; 1851 cipherdata.key_type = RTA_DATA_IMM; 1852 1853 /* Set IV parameters */ 1854 session->iv.offset = xform->cipher.iv.offset; 1855 session->iv.length = xform->cipher.iv.length; 1856 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1857 DIR_ENC : DIR_DEC; 1858 1859 switch (xform->cipher.algo) { 1860 case RTE_CRYPTO_CIPHER_AES_CBC: 1861 cipherdata.algtype = OP_ALG_ALGSEL_AES; 1862 cipherdata.algmode = OP_ALG_AAI_CBC; 1863 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 1864 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 1865 SHR_NEVER, &cipherdata, 1866 session->iv.length, 1867 session->dir); 1868 break; 1869 case RTE_CRYPTO_CIPHER_3DES_CBC: 1870 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 1871 cipherdata.algmode = OP_ALG_AAI_CBC; 1872 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 1873 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 1874 SHR_NEVER, &cipherdata, 1875 session->iv.length, 1876 session->dir); 1877 break; 1878 case RTE_CRYPTO_CIPHER_AES_CTR: 1879 cipherdata.algtype = OP_ALG_ALGSEL_AES; 1880 cipherdata.algmode = OP_ALG_AAI_CTR; 1881 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 1882 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 1883 SHR_NEVER, &cipherdata, 1884 session->iv.length, 1885 session->dir); 1886 break; 1887 case RTE_CRYPTO_CIPHER_3DES_CTR: 1888 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 1889 cipherdata.algmode = OP_ALG_AAI_CTR; 1890 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CTR; 1891 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 1892 SHR_NEVER, &cipherdata, 1893 session->iv.length, 1894 session->dir); 1895 break; 1896 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 1897 cipherdata.algtype = OP_ALG_ALGSEL_SNOW_F8; 1898 session->cipher_alg = RTE_CRYPTO_CIPHER_SNOW3G_UEA2; 1899 bufsize = cnstr_shdsc_snow_f8(priv->flc_desc[0].desc, 1, 0, 1900 &cipherdata, 1901 session->dir); 1902 break; 1903 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 1904 cipherdata.algtype = OP_ALG_ALGSEL_ZUCE; 1905 session->cipher_alg = RTE_CRYPTO_CIPHER_ZUC_EEA3; 1906 bufsize = cnstr_shdsc_zuce(priv->flc_desc[0].desc, 1, 0, 1907 &cipherdata, 1908 session->dir); 1909 break; 1910 case RTE_CRYPTO_CIPHER_KASUMI_F8: 1911 case RTE_CRYPTO_CIPHER_AES_F8: 1912 case RTE_CRYPTO_CIPHER_AES_ECB: 1913 case RTE_CRYPTO_CIPHER_3DES_ECB: 1914 case RTE_CRYPTO_CIPHER_AES_XTS: 1915 case RTE_CRYPTO_CIPHER_ARC4: 1916 case RTE_CRYPTO_CIPHER_NULL: 1917 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", 1918 xform->cipher.algo); 1919 goto error_out; 1920 default: 1921 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 1922 xform->cipher.algo); 1923 goto error_out; 1924 } 1925 1926 if (bufsize < 0) { 1927 DPAA2_SEC_ERR("Crypto: Descriptor build failed"); 1928 goto error_out; 1929 } 1930 1931 flc->word1_sdl = (uint8_t)bufsize; 1932 session->ctxt = priv; 1933 1934 #ifdef CAAM_DESC_DEBUG 1935 int i; 1936 for (i = 0; i < bufsize; i++) 1937 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]); 1938 #endif 1939 return 0; 1940 1941 error_out: 1942 rte_free(session->cipher_key.data); 1943 rte_free(priv); 1944 return -1; 1945 } 1946 1947 static int 1948 dpaa2_sec_auth_init(struct rte_cryptodev *dev, 1949 struct rte_crypto_sym_xform *xform, 1950 dpaa2_sec_session *session) 1951 { 1952 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1953 struct alginfo authdata; 1954 int bufsize; 1955 struct ctxt_priv *priv; 1956 struct sec_flow_context *flc; 1957 1958 PMD_INIT_FUNC_TRACE(); 1959 1960 /* For SEC AUTH three descriptors are required for various stages */ 1961 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1962 sizeof(struct ctxt_priv) + 3 * 1963 sizeof(struct sec_flc_desc), 1964 RTE_CACHE_LINE_SIZE); 1965 if (priv == NULL) { 1966 DPAA2_SEC_ERR("No Memory for priv CTXT"); 1967 return -1; 1968 } 1969 1970 priv->fle_pool = dev_priv->fle_pool; 1971 flc = &priv->flc_desc[DESC_INITFINAL].flc; 1972 1973 session->ctxt_type = DPAA2_SEC_AUTH; 1974 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length, 1975 RTE_CACHE_LINE_SIZE); 1976 if (session->auth_key.data == NULL) { 1977 DPAA2_SEC_ERR("Unable to allocate memory for auth key"); 1978 rte_free(priv); 1979 return -1; 1980 } 1981 session->auth_key.length = xform->auth.key.length; 1982 1983 memcpy(session->auth_key.data, xform->auth.key.data, 1984 xform->auth.key.length); 1985 authdata.key = (size_t)session->auth_key.data; 1986 authdata.keylen = session->auth_key.length; 1987 authdata.key_enc_flags = 0; 1988 authdata.key_type = RTA_DATA_IMM; 1989 1990 session->digest_length = xform->auth.digest_length; 1991 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? 1992 DIR_ENC : DIR_DEC; 1993 1994 switch (xform->auth.algo) { 1995 case RTE_CRYPTO_AUTH_SHA1_HMAC: 1996 authdata.algtype = OP_ALG_ALGSEL_SHA1; 1997 authdata.algmode = OP_ALG_AAI_HMAC; 1998 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 1999 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2000 1, 0, SHR_NEVER, &authdata, 2001 !session->dir, 2002 session->digest_length); 2003 break; 2004 case RTE_CRYPTO_AUTH_MD5_HMAC: 2005 authdata.algtype = OP_ALG_ALGSEL_MD5; 2006 authdata.algmode = OP_ALG_AAI_HMAC; 2007 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 2008 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2009 1, 0, SHR_NEVER, &authdata, 2010 !session->dir, 2011 session->digest_length); 2012 break; 2013 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2014 authdata.algtype = OP_ALG_ALGSEL_SHA256; 2015 authdata.algmode = OP_ALG_AAI_HMAC; 2016 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 2017 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2018 1, 0, SHR_NEVER, &authdata, 2019 !session->dir, 2020 session->digest_length); 2021 break; 2022 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2023 authdata.algtype = OP_ALG_ALGSEL_SHA384; 2024 authdata.algmode = OP_ALG_AAI_HMAC; 2025 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 2026 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2027 1, 0, SHR_NEVER, &authdata, 2028 !session->dir, 2029 session->digest_length); 2030 break; 2031 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2032 authdata.algtype = OP_ALG_ALGSEL_SHA512; 2033 authdata.algmode = OP_ALG_AAI_HMAC; 2034 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 2035 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2036 1, 0, SHR_NEVER, &authdata, 2037 !session->dir, 2038 session->digest_length); 2039 break; 2040 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2041 authdata.algtype = OP_ALG_ALGSEL_SHA224; 2042 authdata.algmode = OP_ALG_AAI_HMAC; 2043 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 2044 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2045 1, 0, SHR_NEVER, &authdata, 2046 !session->dir, 2047 session->digest_length); 2048 break; 2049 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2050 authdata.algtype = OP_ALG_ALGSEL_SNOW_F9; 2051 authdata.algmode = OP_ALG_AAI_F9; 2052 session->auth_alg = RTE_CRYPTO_AUTH_SNOW3G_UIA2; 2053 session->iv.offset = xform->auth.iv.offset; 2054 session->iv.length = xform->auth.iv.length; 2055 bufsize = cnstr_shdsc_snow_f9(priv->flc_desc[DESC_INITFINAL].desc, 2056 1, 0, &authdata, 2057 !session->dir, 2058 session->digest_length); 2059 break; 2060 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2061 authdata.algtype = OP_ALG_ALGSEL_ZUCA; 2062 authdata.algmode = OP_ALG_AAI_F9; 2063 session->auth_alg = RTE_CRYPTO_AUTH_ZUC_EIA3; 2064 session->iv.offset = xform->auth.iv.offset; 2065 session->iv.length = xform->auth.iv.length; 2066 bufsize = cnstr_shdsc_zuca(priv->flc_desc[DESC_INITFINAL].desc, 2067 1, 0, &authdata, 2068 !session->dir, 2069 session->digest_length); 2070 break; 2071 case RTE_CRYPTO_AUTH_KASUMI_F9: 2072 case RTE_CRYPTO_AUTH_NULL: 2073 case RTE_CRYPTO_AUTH_SHA1: 2074 case RTE_CRYPTO_AUTH_SHA256: 2075 case RTE_CRYPTO_AUTH_SHA512: 2076 case RTE_CRYPTO_AUTH_SHA224: 2077 case RTE_CRYPTO_AUTH_SHA384: 2078 case RTE_CRYPTO_AUTH_MD5: 2079 case RTE_CRYPTO_AUTH_AES_GMAC: 2080 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2081 case RTE_CRYPTO_AUTH_AES_CMAC: 2082 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 2083 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un", 2084 xform->auth.algo); 2085 goto error_out; 2086 default: 2087 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", 2088 xform->auth.algo); 2089 goto error_out; 2090 } 2091 2092 if (bufsize < 0) { 2093 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2094 goto error_out; 2095 } 2096 2097 flc->word1_sdl = (uint8_t)bufsize; 2098 session->ctxt = priv; 2099 #ifdef CAAM_DESC_DEBUG 2100 int i; 2101 for (i = 0; i < bufsize; i++) 2102 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", 2103 i, priv->flc_desc[DESC_INITFINAL].desc[i]); 2104 #endif 2105 2106 return 0; 2107 2108 error_out: 2109 rte_free(session->auth_key.data); 2110 rte_free(priv); 2111 return -1; 2112 } 2113 2114 static int 2115 dpaa2_sec_aead_init(struct rte_cryptodev *dev, 2116 struct rte_crypto_sym_xform *xform, 2117 dpaa2_sec_session *session) 2118 { 2119 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt; 2120 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 2121 struct alginfo aeaddata; 2122 int bufsize; 2123 struct ctxt_priv *priv; 2124 struct sec_flow_context *flc; 2125 struct rte_crypto_aead_xform *aead_xform = &xform->aead; 2126 int err; 2127 2128 PMD_INIT_FUNC_TRACE(); 2129 2130 /* Set IV parameters */ 2131 session->iv.offset = aead_xform->iv.offset; 2132 session->iv.length = aead_xform->iv.length; 2133 session->ctxt_type = DPAA2_SEC_AEAD; 2134 2135 /* For SEC AEAD only one descriptor is required */ 2136 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2137 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 2138 RTE_CACHE_LINE_SIZE); 2139 if (priv == NULL) { 2140 DPAA2_SEC_ERR("No Memory for priv CTXT"); 2141 return -1; 2142 } 2143 2144 priv->fle_pool = dev_priv->fle_pool; 2145 flc = &priv->flc_desc[0].flc; 2146 2147 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 2148 RTE_CACHE_LINE_SIZE); 2149 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 2150 DPAA2_SEC_ERR("No Memory for aead key"); 2151 rte_free(priv); 2152 return -1; 2153 } 2154 memcpy(session->aead_key.data, aead_xform->key.data, 2155 aead_xform->key.length); 2156 2157 session->digest_length = aead_xform->digest_length; 2158 session->aead_key.length = aead_xform->key.length; 2159 ctxt->auth_only_len = aead_xform->aad_length; 2160 2161 aeaddata.key = (size_t)session->aead_key.data; 2162 aeaddata.keylen = session->aead_key.length; 2163 aeaddata.key_enc_flags = 0; 2164 aeaddata.key_type = RTA_DATA_IMM; 2165 2166 switch (aead_xform->algo) { 2167 case RTE_CRYPTO_AEAD_AES_GCM: 2168 aeaddata.algtype = OP_ALG_ALGSEL_AES; 2169 aeaddata.algmode = OP_ALG_AAI_GCM; 2170 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; 2171 break; 2172 case RTE_CRYPTO_AEAD_AES_CCM: 2173 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u", 2174 aead_xform->algo); 2175 goto error_out; 2176 default: 2177 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u", 2178 aead_xform->algo); 2179 goto error_out; 2180 } 2181 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 2182 DIR_ENC : DIR_DEC; 2183 2184 priv->flc_desc[0].desc[0] = aeaddata.keylen; 2185 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 2186 MIN_JOB_DESC_SIZE, 2187 (unsigned int *)priv->flc_desc[0].desc, 2188 &priv->flc_desc[0].desc[1], 1); 2189 2190 if (err < 0) { 2191 DPAA2_SEC_ERR("Crypto: Incorrect key lengths"); 2192 goto error_out; 2193 } 2194 if (priv->flc_desc[0].desc[1] & 1) { 2195 aeaddata.key_type = RTA_DATA_IMM; 2196 } else { 2197 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key); 2198 aeaddata.key_type = RTA_DATA_PTR; 2199 } 2200 priv->flc_desc[0].desc[0] = 0; 2201 priv->flc_desc[0].desc[1] = 0; 2202 2203 if (session->dir == DIR_ENC) 2204 bufsize = cnstr_shdsc_gcm_encap( 2205 priv->flc_desc[0].desc, 1, 0, SHR_NEVER, 2206 &aeaddata, session->iv.length, 2207 session->digest_length); 2208 else 2209 bufsize = cnstr_shdsc_gcm_decap( 2210 priv->flc_desc[0].desc, 1, 0, SHR_NEVER, 2211 &aeaddata, session->iv.length, 2212 session->digest_length); 2213 if (bufsize < 0) { 2214 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2215 goto error_out; 2216 } 2217 2218 flc->word1_sdl = (uint8_t)bufsize; 2219 session->ctxt = priv; 2220 #ifdef CAAM_DESC_DEBUG 2221 int i; 2222 for (i = 0; i < bufsize; i++) 2223 DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n", 2224 i, priv->flc_desc[0].desc[i]); 2225 #endif 2226 return 0; 2227 2228 error_out: 2229 rte_free(session->aead_key.data); 2230 rte_free(priv); 2231 return -1; 2232 } 2233 2234 2235 static int 2236 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, 2237 struct rte_crypto_sym_xform *xform, 2238 dpaa2_sec_session *session) 2239 { 2240 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 2241 struct alginfo authdata, cipherdata; 2242 int bufsize; 2243 struct ctxt_priv *priv; 2244 struct sec_flow_context *flc; 2245 struct rte_crypto_cipher_xform *cipher_xform; 2246 struct rte_crypto_auth_xform *auth_xform; 2247 int err; 2248 2249 PMD_INIT_FUNC_TRACE(); 2250 2251 if (session->ext_params.aead_ctxt.auth_cipher_text) { 2252 cipher_xform = &xform->cipher; 2253 auth_xform = &xform->next->auth; 2254 session->ctxt_type = 2255 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2256 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER; 2257 } else { 2258 cipher_xform = &xform->next->cipher; 2259 auth_xform = &xform->auth; 2260 session->ctxt_type = 2261 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2262 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH; 2263 } 2264 2265 /* Set IV parameters */ 2266 session->iv.offset = cipher_xform->iv.offset; 2267 session->iv.length = cipher_xform->iv.length; 2268 2269 /* For SEC AEAD only one descriptor is required */ 2270 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2271 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 2272 RTE_CACHE_LINE_SIZE); 2273 if (priv == NULL) { 2274 DPAA2_SEC_ERR("No Memory for priv CTXT"); 2275 return -1; 2276 } 2277 2278 priv->fle_pool = dev_priv->fle_pool; 2279 flc = &priv->flc_desc[0].flc; 2280 2281 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length, 2282 RTE_CACHE_LINE_SIZE); 2283 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) { 2284 DPAA2_SEC_ERR("No Memory for cipher key"); 2285 rte_free(priv); 2286 return -1; 2287 } 2288 session->cipher_key.length = cipher_xform->key.length; 2289 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length, 2290 RTE_CACHE_LINE_SIZE); 2291 if (session->auth_key.data == NULL && auth_xform->key.length > 0) { 2292 DPAA2_SEC_ERR("No Memory for auth key"); 2293 rte_free(session->cipher_key.data); 2294 rte_free(priv); 2295 return -1; 2296 } 2297 session->auth_key.length = auth_xform->key.length; 2298 memcpy(session->cipher_key.data, cipher_xform->key.data, 2299 cipher_xform->key.length); 2300 memcpy(session->auth_key.data, auth_xform->key.data, 2301 auth_xform->key.length); 2302 2303 authdata.key = (size_t)session->auth_key.data; 2304 authdata.keylen = session->auth_key.length; 2305 authdata.key_enc_flags = 0; 2306 authdata.key_type = RTA_DATA_IMM; 2307 2308 session->digest_length = auth_xform->digest_length; 2309 2310 switch (auth_xform->algo) { 2311 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2312 authdata.algtype = OP_ALG_ALGSEL_SHA1; 2313 authdata.algmode = OP_ALG_AAI_HMAC; 2314 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 2315 break; 2316 case RTE_CRYPTO_AUTH_MD5_HMAC: 2317 authdata.algtype = OP_ALG_ALGSEL_MD5; 2318 authdata.algmode = OP_ALG_AAI_HMAC; 2319 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 2320 break; 2321 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2322 authdata.algtype = OP_ALG_ALGSEL_SHA224; 2323 authdata.algmode = OP_ALG_AAI_HMAC; 2324 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 2325 break; 2326 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2327 authdata.algtype = OP_ALG_ALGSEL_SHA256; 2328 authdata.algmode = OP_ALG_AAI_HMAC; 2329 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 2330 break; 2331 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2332 authdata.algtype = OP_ALG_ALGSEL_SHA384; 2333 authdata.algmode = OP_ALG_AAI_HMAC; 2334 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 2335 break; 2336 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2337 authdata.algtype = OP_ALG_ALGSEL_SHA512; 2338 authdata.algmode = OP_ALG_AAI_HMAC; 2339 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 2340 break; 2341 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2342 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2343 case RTE_CRYPTO_AUTH_NULL: 2344 case RTE_CRYPTO_AUTH_SHA1: 2345 case RTE_CRYPTO_AUTH_SHA256: 2346 case RTE_CRYPTO_AUTH_SHA512: 2347 case RTE_CRYPTO_AUTH_SHA224: 2348 case RTE_CRYPTO_AUTH_SHA384: 2349 case RTE_CRYPTO_AUTH_MD5: 2350 case RTE_CRYPTO_AUTH_AES_GMAC: 2351 case RTE_CRYPTO_AUTH_KASUMI_F9: 2352 case RTE_CRYPTO_AUTH_AES_CMAC: 2353 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 2354 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2355 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 2356 auth_xform->algo); 2357 goto error_out; 2358 default: 2359 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", 2360 auth_xform->algo); 2361 goto error_out; 2362 } 2363 cipherdata.key = (size_t)session->cipher_key.data; 2364 cipherdata.keylen = session->cipher_key.length; 2365 cipherdata.key_enc_flags = 0; 2366 cipherdata.key_type = RTA_DATA_IMM; 2367 2368 switch (cipher_xform->algo) { 2369 case RTE_CRYPTO_CIPHER_AES_CBC: 2370 cipherdata.algtype = OP_ALG_ALGSEL_AES; 2371 cipherdata.algmode = OP_ALG_AAI_CBC; 2372 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 2373 break; 2374 case RTE_CRYPTO_CIPHER_3DES_CBC: 2375 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 2376 cipherdata.algmode = OP_ALG_AAI_CBC; 2377 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 2378 break; 2379 case RTE_CRYPTO_CIPHER_AES_CTR: 2380 cipherdata.algtype = OP_ALG_ALGSEL_AES; 2381 cipherdata.algmode = OP_ALG_AAI_CTR; 2382 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 2383 break; 2384 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2385 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2386 case RTE_CRYPTO_CIPHER_NULL: 2387 case RTE_CRYPTO_CIPHER_3DES_ECB: 2388 case RTE_CRYPTO_CIPHER_AES_ECB: 2389 case RTE_CRYPTO_CIPHER_KASUMI_F8: 2390 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", 2391 cipher_xform->algo); 2392 goto error_out; 2393 default: 2394 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 2395 cipher_xform->algo); 2396 goto error_out; 2397 } 2398 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2399 DIR_ENC : DIR_DEC; 2400 2401 priv->flc_desc[0].desc[0] = cipherdata.keylen; 2402 priv->flc_desc[0].desc[1] = authdata.keylen; 2403 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 2404 MIN_JOB_DESC_SIZE, 2405 (unsigned int *)priv->flc_desc[0].desc, 2406 &priv->flc_desc[0].desc[2], 2); 2407 2408 if (err < 0) { 2409 DPAA2_SEC_ERR("Crypto: Incorrect key lengths"); 2410 goto error_out; 2411 } 2412 if (priv->flc_desc[0].desc[2] & 1) { 2413 cipherdata.key_type = RTA_DATA_IMM; 2414 } else { 2415 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); 2416 cipherdata.key_type = RTA_DATA_PTR; 2417 } 2418 if (priv->flc_desc[0].desc[2] & (1 << 1)) { 2419 authdata.key_type = RTA_DATA_IMM; 2420 } else { 2421 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key); 2422 authdata.key_type = RTA_DATA_PTR; 2423 } 2424 priv->flc_desc[0].desc[0] = 0; 2425 priv->flc_desc[0].desc[1] = 0; 2426 priv->flc_desc[0].desc[2] = 0; 2427 2428 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) { 2429 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1, 2430 0, SHR_SERIAL, 2431 &cipherdata, &authdata, 2432 session->iv.length, 2433 session->digest_length, 2434 session->dir); 2435 if (bufsize < 0) { 2436 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2437 goto error_out; 2438 } 2439 } else { 2440 DPAA2_SEC_ERR("Hash before cipher not supported"); 2441 goto error_out; 2442 } 2443 2444 flc->word1_sdl = (uint8_t)bufsize; 2445 session->ctxt = priv; 2446 #ifdef CAAM_DESC_DEBUG 2447 int i; 2448 for (i = 0; i < bufsize; i++) 2449 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", 2450 i, priv->flc_desc[0].desc[i]); 2451 #endif 2452 2453 return 0; 2454 2455 error_out: 2456 rte_free(session->cipher_key.data); 2457 rte_free(session->auth_key.data); 2458 rte_free(priv); 2459 return -1; 2460 } 2461 2462 static int 2463 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev, 2464 struct rte_crypto_sym_xform *xform, void *sess) 2465 { 2466 dpaa2_sec_session *session = sess; 2467 int ret; 2468 2469 PMD_INIT_FUNC_TRACE(); 2470 2471 if (unlikely(sess == NULL)) { 2472 DPAA2_SEC_ERR("Invalid session struct"); 2473 return -1; 2474 } 2475 2476 memset(session, 0, sizeof(dpaa2_sec_session)); 2477 /* Default IV length = 0 */ 2478 session->iv.length = 0; 2479 2480 /* Cipher Only */ 2481 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 2482 ret = dpaa2_sec_cipher_init(dev, xform, session); 2483 2484 /* Authentication Only */ 2485 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2486 xform->next == NULL) { 2487 ret = dpaa2_sec_auth_init(dev, xform, session); 2488 2489 /* Cipher then Authenticate */ 2490 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 2491 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2492 session->ext_params.aead_ctxt.auth_cipher_text = true; 2493 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL) 2494 ret = dpaa2_sec_auth_init(dev, xform, session); 2495 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL) 2496 ret = dpaa2_sec_cipher_init(dev, xform, session); 2497 else 2498 ret = dpaa2_sec_aead_chain_init(dev, xform, session); 2499 /* Authenticate then Cipher */ 2500 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2501 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2502 session->ext_params.aead_ctxt.auth_cipher_text = false; 2503 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL) 2504 ret = dpaa2_sec_cipher_init(dev, xform, session); 2505 else if (xform->next->cipher.algo == RTE_CRYPTO_CIPHER_NULL) 2506 ret = dpaa2_sec_auth_init(dev, xform, session); 2507 else 2508 ret = dpaa2_sec_aead_chain_init(dev, xform, session); 2509 /* AEAD operation for AES-GCM kind of Algorithms */ 2510 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && 2511 xform->next == NULL) { 2512 ret = dpaa2_sec_aead_init(dev, xform, session); 2513 2514 } else { 2515 DPAA2_SEC_ERR("Invalid crypto type"); 2516 return -EINVAL; 2517 } 2518 2519 return ret; 2520 } 2521 2522 #ifdef RTE_LIBRTE_SECURITY 2523 static int 2524 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform, 2525 dpaa2_sec_session *session, 2526 struct alginfo *aeaddata) 2527 { 2528 PMD_INIT_FUNC_TRACE(); 2529 2530 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 2531 RTE_CACHE_LINE_SIZE); 2532 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 2533 DPAA2_SEC_ERR("No Memory for aead key"); 2534 return -1; 2535 } 2536 memcpy(session->aead_key.data, aead_xform->key.data, 2537 aead_xform->key.length); 2538 2539 session->digest_length = aead_xform->digest_length; 2540 session->aead_key.length = aead_xform->key.length; 2541 2542 aeaddata->key = (size_t)session->aead_key.data; 2543 aeaddata->keylen = session->aead_key.length; 2544 aeaddata->key_enc_flags = 0; 2545 aeaddata->key_type = RTA_DATA_IMM; 2546 2547 switch (aead_xform->algo) { 2548 case RTE_CRYPTO_AEAD_AES_GCM: 2549 switch (session->digest_length) { 2550 case 8: 2551 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM8; 2552 break; 2553 case 12: 2554 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM12; 2555 break; 2556 case 16: 2557 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM16; 2558 break; 2559 default: 2560 DPAA2_SEC_ERR("Crypto: Undefined GCM digest %d", 2561 session->digest_length); 2562 return -1; 2563 } 2564 aeaddata->algmode = OP_ALG_AAI_GCM; 2565 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; 2566 break; 2567 case RTE_CRYPTO_AEAD_AES_CCM: 2568 switch (session->digest_length) { 2569 case 8: 2570 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM8; 2571 break; 2572 case 12: 2573 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM12; 2574 break; 2575 case 16: 2576 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM16; 2577 break; 2578 default: 2579 DPAA2_SEC_ERR("Crypto: Undefined CCM digest %d", 2580 session->digest_length); 2581 return -1; 2582 } 2583 aeaddata->algmode = OP_ALG_AAI_CCM; 2584 session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM; 2585 break; 2586 default: 2587 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u", 2588 aead_xform->algo); 2589 return -1; 2590 } 2591 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 2592 DIR_ENC : DIR_DEC; 2593 2594 return 0; 2595 } 2596 2597 static int 2598 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform, 2599 struct rte_crypto_auth_xform *auth_xform, 2600 dpaa2_sec_session *session, 2601 struct alginfo *cipherdata, 2602 struct alginfo *authdata) 2603 { 2604 if (cipher_xform) { 2605 session->cipher_key.data = rte_zmalloc(NULL, 2606 cipher_xform->key.length, 2607 RTE_CACHE_LINE_SIZE); 2608 if (session->cipher_key.data == NULL && 2609 cipher_xform->key.length > 0) { 2610 DPAA2_SEC_ERR("No Memory for cipher key"); 2611 return -ENOMEM; 2612 } 2613 2614 session->cipher_key.length = cipher_xform->key.length; 2615 memcpy(session->cipher_key.data, cipher_xform->key.data, 2616 cipher_xform->key.length); 2617 session->cipher_alg = cipher_xform->algo; 2618 } else { 2619 session->cipher_key.data = NULL; 2620 session->cipher_key.length = 0; 2621 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2622 } 2623 2624 if (auth_xform) { 2625 session->auth_key.data = rte_zmalloc(NULL, 2626 auth_xform->key.length, 2627 RTE_CACHE_LINE_SIZE); 2628 if (session->auth_key.data == NULL && 2629 auth_xform->key.length > 0) { 2630 DPAA2_SEC_ERR("No Memory for auth key"); 2631 return -ENOMEM; 2632 } 2633 session->auth_key.length = auth_xform->key.length; 2634 memcpy(session->auth_key.data, auth_xform->key.data, 2635 auth_xform->key.length); 2636 session->auth_alg = auth_xform->algo; 2637 session->digest_length = auth_xform->digest_length; 2638 } else { 2639 session->auth_key.data = NULL; 2640 session->auth_key.length = 0; 2641 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 2642 } 2643 2644 authdata->key = (size_t)session->auth_key.data; 2645 authdata->keylen = session->auth_key.length; 2646 authdata->key_enc_flags = 0; 2647 authdata->key_type = RTA_DATA_IMM; 2648 switch (session->auth_alg) { 2649 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2650 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96; 2651 authdata->algmode = OP_ALG_AAI_HMAC; 2652 break; 2653 case RTE_CRYPTO_AUTH_MD5_HMAC: 2654 authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96; 2655 authdata->algmode = OP_ALG_AAI_HMAC; 2656 break; 2657 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2658 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128; 2659 authdata->algmode = OP_ALG_AAI_HMAC; 2660 if (session->digest_length != 16) 2661 DPAA2_SEC_WARN( 2662 "+++Using sha256-hmac truncated len is non-standard," 2663 "it will not work with lookaside proto"); 2664 break; 2665 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2666 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192; 2667 authdata->algmode = OP_ALG_AAI_HMAC; 2668 break; 2669 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2670 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256; 2671 authdata->algmode = OP_ALG_AAI_HMAC; 2672 break; 2673 case RTE_CRYPTO_AUTH_AES_CMAC: 2674 authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96; 2675 break; 2676 case RTE_CRYPTO_AUTH_NULL: 2677 authdata->algtype = OP_PCL_IPSEC_HMAC_NULL; 2678 break; 2679 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2680 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2681 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2682 case RTE_CRYPTO_AUTH_SHA1: 2683 case RTE_CRYPTO_AUTH_SHA256: 2684 case RTE_CRYPTO_AUTH_SHA512: 2685 case RTE_CRYPTO_AUTH_SHA224: 2686 case RTE_CRYPTO_AUTH_SHA384: 2687 case RTE_CRYPTO_AUTH_MD5: 2688 case RTE_CRYPTO_AUTH_AES_GMAC: 2689 case RTE_CRYPTO_AUTH_KASUMI_F9: 2690 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 2691 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2692 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 2693 session->auth_alg); 2694 return -1; 2695 default: 2696 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", 2697 session->auth_alg); 2698 return -1; 2699 } 2700 cipherdata->key = (size_t)session->cipher_key.data; 2701 cipherdata->keylen = session->cipher_key.length; 2702 cipherdata->key_enc_flags = 0; 2703 cipherdata->key_type = RTA_DATA_IMM; 2704 2705 switch (session->cipher_alg) { 2706 case RTE_CRYPTO_CIPHER_AES_CBC: 2707 cipherdata->algtype = OP_PCL_IPSEC_AES_CBC; 2708 cipherdata->algmode = OP_ALG_AAI_CBC; 2709 break; 2710 case RTE_CRYPTO_CIPHER_3DES_CBC: 2711 cipherdata->algtype = OP_PCL_IPSEC_3DES; 2712 cipherdata->algmode = OP_ALG_AAI_CBC; 2713 break; 2714 case RTE_CRYPTO_CIPHER_AES_CTR: 2715 cipherdata->algtype = OP_PCL_IPSEC_AES_CTR; 2716 cipherdata->algmode = OP_ALG_AAI_CTR; 2717 break; 2718 case RTE_CRYPTO_CIPHER_NULL: 2719 cipherdata->algtype = OP_PCL_IPSEC_NULL; 2720 break; 2721 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2722 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2723 case RTE_CRYPTO_CIPHER_3DES_ECB: 2724 case RTE_CRYPTO_CIPHER_AES_ECB: 2725 case RTE_CRYPTO_CIPHER_KASUMI_F8: 2726 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", 2727 session->cipher_alg); 2728 return -1; 2729 default: 2730 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 2731 session->cipher_alg); 2732 return -1; 2733 } 2734 2735 return 0; 2736 } 2737 2738 #ifdef RTE_LIBRTE_SECURITY_TEST 2739 static uint8_t aes_cbc_iv[] = { 2740 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 2741 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }; 2742 #endif 2743 2744 static int 2745 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, 2746 struct rte_security_session_conf *conf, 2747 void *sess) 2748 { 2749 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec; 2750 struct rte_crypto_cipher_xform *cipher_xform = NULL; 2751 struct rte_crypto_auth_xform *auth_xform = NULL; 2752 struct rte_crypto_aead_xform *aead_xform = NULL; 2753 dpaa2_sec_session *session = (dpaa2_sec_session *)sess; 2754 struct ctxt_priv *priv; 2755 struct alginfo authdata, cipherdata; 2756 int bufsize; 2757 struct sec_flow_context *flc; 2758 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 2759 int ret = -1; 2760 2761 PMD_INIT_FUNC_TRACE(); 2762 2763 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2764 sizeof(struct ctxt_priv) + 2765 sizeof(struct sec_flc_desc), 2766 RTE_CACHE_LINE_SIZE); 2767 2768 if (priv == NULL) { 2769 DPAA2_SEC_ERR("No memory for priv CTXT"); 2770 return -ENOMEM; 2771 } 2772 2773 priv->fle_pool = dev_priv->fle_pool; 2774 flc = &priv->flc_desc[0].flc; 2775 2776 memset(session, 0, sizeof(dpaa2_sec_session)); 2777 2778 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2779 cipher_xform = &conf->crypto_xform->cipher; 2780 if (conf->crypto_xform->next) 2781 auth_xform = &conf->crypto_xform->next->auth; 2782 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform, 2783 session, &cipherdata, &authdata); 2784 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2785 auth_xform = &conf->crypto_xform->auth; 2786 if (conf->crypto_xform->next) 2787 cipher_xform = &conf->crypto_xform->next->cipher; 2788 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform, 2789 session, &cipherdata, &authdata); 2790 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { 2791 aead_xform = &conf->crypto_xform->aead; 2792 ret = dpaa2_sec_ipsec_aead_init(aead_xform, 2793 session, &cipherdata); 2794 authdata.keylen = 0; 2795 authdata.algtype = 0; 2796 } else { 2797 DPAA2_SEC_ERR("XFORM not specified"); 2798 ret = -EINVAL; 2799 goto out; 2800 } 2801 if (ret) { 2802 DPAA2_SEC_ERR("Failed to process xform"); 2803 goto out; 2804 } 2805 2806 session->ctxt_type = DPAA2_SEC_IPSEC; 2807 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 2808 uint8_t *hdr = NULL; 2809 struct ip ip4_hdr; 2810 struct rte_ipv6_hdr ip6_hdr; 2811 struct ipsec_encap_pdb encap_pdb; 2812 2813 flc->dhr = SEC_FLC_DHR_OUTBOUND; 2814 /* For Sec Proto only one descriptor is required. */ 2815 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb)); 2816 2817 /* copy algo specific data to PDB */ 2818 switch (cipherdata.algtype) { 2819 case OP_PCL_IPSEC_AES_CTR: 2820 encap_pdb.ctr.ctr_initial = 0x00000001; 2821 encap_pdb.ctr.ctr_nonce = ipsec_xform->salt; 2822 break; 2823 case OP_PCL_IPSEC_AES_GCM8: 2824 case OP_PCL_IPSEC_AES_GCM12: 2825 case OP_PCL_IPSEC_AES_GCM16: 2826 memcpy(encap_pdb.gcm.salt, 2827 (uint8_t *)&(ipsec_xform->salt), 4); 2828 break; 2829 } 2830 2831 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) | 2832 PDBOPTS_ESP_OIHI_PDB_INL | 2833 PDBOPTS_ESP_IVSRC | 2834 PDBHMO_ESP_ENCAP_DTTL | 2835 PDBHMO_ESP_SNR; 2836 if (ipsec_xform->options.esn) 2837 encap_pdb.options |= PDBOPTS_ESP_ESN; 2838 encap_pdb.spi = ipsec_xform->spi; 2839 session->dir = DIR_ENC; 2840 if (ipsec_xform->tunnel.type == 2841 RTE_SECURITY_IPSEC_TUNNEL_IPV4) { 2842 encap_pdb.ip_hdr_len = sizeof(struct ip); 2843 ip4_hdr.ip_v = IPVERSION; 2844 ip4_hdr.ip_hl = 5; 2845 ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr)); 2846 ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp; 2847 ip4_hdr.ip_id = 0; 2848 ip4_hdr.ip_off = 0; 2849 ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl; 2850 ip4_hdr.ip_p = IPPROTO_ESP; 2851 ip4_hdr.ip_sum = 0; 2852 ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip; 2853 ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip; 2854 ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *) 2855 &ip4_hdr, sizeof(struct ip)); 2856 hdr = (uint8_t *)&ip4_hdr; 2857 } else if (ipsec_xform->tunnel.type == 2858 RTE_SECURITY_IPSEC_TUNNEL_IPV6) { 2859 ip6_hdr.vtc_flow = rte_cpu_to_be_32( 2860 DPAA2_IPv6_DEFAULT_VTC_FLOW | 2861 ((ipsec_xform->tunnel.ipv6.dscp << 2862 RTE_IPV6_HDR_TC_SHIFT) & 2863 RTE_IPV6_HDR_TC_MASK) | 2864 ((ipsec_xform->tunnel.ipv6.flabel << 2865 RTE_IPV6_HDR_FL_SHIFT) & 2866 RTE_IPV6_HDR_FL_MASK)); 2867 /* Payload length will be updated by HW */ 2868 ip6_hdr.payload_len = 0; 2869 ip6_hdr.hop_limits = 2870 ipsec_xform->tunnel.ipv6.hlimit; 2871 ip6_hdr.proto = (ipsec_xform->proto == 2872 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? 2873 IPPROTO_ESP : IPPROTO_AH; 2874 memcpy(&ip6_hdr.src_addr, 2875 &ipsec_xform->tunnel.ipv6.src_addr, 16); 2876 memcpy(&ip6_hdr.dst_addr, 2877 &ipsec_xform->tunnel.ipv6.dst_addr, 16); 2878 encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr); 2879 hdr = (uint8_t *)&ip6_hdr; 2880 } 2881 2882 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc, 2883 1, 0, SHR_SERIAL, &encap_pdb, 2884 hdr, &cipherdata, &authdata); 2885 } else if (ipsec_xform->direction == 2886 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { 2887 struct ipsec_decap_pdb decap_pdb; 2888 2889 flc->dhr = SEC_FLC_DHR_INBOUND; 2890 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb)); 2891 /* copy algo specific data to PDB */ 2892 switch (cipherdata.algtype) { 2893 case OP_PCL_IPSEC_AES_CTR: 2894 decap_pdb.ctr.ctr_initial = 0x00000001; 2895 decap_pdb.ctr.ctr_nonce = ipsec_xform->salt; 2896 break; 2897 case OP_PCL_IPSEC_AES_GCM8: 2898 case OP_PCL_IPSEC_AES_GCM12: 2899 case OP_PCL_IPSEC_AES_GCM16: 2900 memcpy(decap_pdb.gcm.salt, 2901 (uint8_t *)&(ipsec_xform->salt), 4); 2902 break; 2903 } 2904 2905 decap_pdb.options = (ipsec_xform->tunnel.type == 2906 RTE_SECURITY_IPSEC_TUNNEL_IPV4) ? 2907 sizeof(struct ip) << 16 : 2908 sizeof(struct rte_ipv6_hdr) << 16; 2909 if (ipsec_xform->options.esn) 2910 decap_pdb.options |= PDBOPTS_ESP_ESN; 2911 2912 if (ipsec_xform->replay_win_sz) { 2913 uint32_t win_sz; 2914 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz); 2915 2916 switch (win_sz) { 2917 case 1: 2918 case 2: 2919 case 4: 2920 case 8: 2921 case 16: 2922 case 32: 2923 decap_pdb.options |= PDBOPTS_ESP_ARS32; 2924 break; 2925 case 64: 2926 decap_pdb.options |= PDBOPTS_ESP_ARS64; 2927 break; 2928 default: 2929 decap_pdb.options |= PDBOPTS_ESP_ARS128; 2930 } 2931 } 2932 session->dir = DIR_DEC; 2933 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc, 2934 1, 0, SHR_SERIAL, 2935 &decap_pdb, &cipherdata, &authdata); 2936 } else 2937 goto out; 2938 2939 if (bufsize < 0) { 2940 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2941 goto out; 2942 } 2943 2944 flc->word1_sdl = (uint8_t)bufsize; 2945 2946 /* Enable the stashing control bit */ 2947 DPAA2_SET_FLC_RSC(flc); 2948 flc->word2_rflc_31_0 = lower_32_bits( 2949 (size_t)&(((struct dpaa2_sec_qp *) 2950 dev->data->queue_pairs[0])->rx_vq) | 0x14); 2951 flc->word3_rflc_63_32 = upper_32_bits( 2952 (size_t)&(((struct dpaa2_sec_qp *) 2953 dev->data->queue_pairs[0])->rx_vq)); 2954 2955 /* Set EWS bit i.e. enable write-safe */ 2956 DPAA2_SET_FLC_EWS(flc); 2957 /* Set BS = 1 i.e reuse input buffers as output buffers */ 2958 DPAA2_SET_FLC_REUSE_BS(flc); 2959 /* Set FF = 10; reuse input buffers if they provide sufficient space */ 2960 DPAA2_SET_FLC_REUSE_FF(flc); 2961 2962 session->ctxt = priv; 2963 2964 return 0; 2965 out: 2966 rte_free(session->auth_key.data); 2967 rte_free(session->cipher_key.data); 2968 rte_free(priv); 2969 return ret; 2970 } 2971 2972 static int 2973 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev, 2974 struct rte_security_session_conf *conf, 2975 void *sess) 2976 { 2977 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp; 2978 struct rte_crypto_sym_xform *xform = conf->crypto_xform; 2979 struct rte_crypto_auth_xform *auth_xform = NULL; 2980 struct rte_crypto_cipher_xform *cipher_xform; 2981 dpaa2_sec_session *session = (dpaa2_sec_session *)sess; 2982 struct ctxt_priv *priv; 2983 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 2984 struct alginfo authdata, cipherdata; 2985 struct alginfo *p_authdata = NULL; 2986 int bufsize = -1; 2987 struct sec_flow_context *flc; 2988 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 2989 int swap = true; 2990 #else 2991 int swap = false; 2992 #endif 2993 2994 PMD_INIT_FUNC_TRACE(); 2995 2996 memset(session, 0, sizeof(dpaa2_sec_session)); 2997 2998 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2999 sizeof(struct ctxt_priv) + 3000 sizeof(struct sec_flc_desc), 3001 RTE_CACHE_LINE_SIZE); 3002 3003 if (priv == NULL) { 3004 DPAA2_SEC_ERR("No memory for priv CTXT"); 3005 return -ENOMEM; 3006 } 3007 3008 priv->fle_pool = dev_priv->fle_pool; 3009 flc = &priv->flc_desc[0].flc; 3010 3011 /* find xfrm types */ 3012 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 3013 cipher_xform = &xform->cipher; 3014 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 3015 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 3016 session->ext_params.aead_ctxt.auth_cipher_text = true; 3017 cipher_xform = &xform->cipher; 3018 auth_xform = &xform->next->auth; 3019 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 3020 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 3021 session->ext_params.aead_ctxt.auth_cipher_text = false; 3022 cipher_xform = &xform->next->cipher; 3023 auth_xform = &xform->auth; 3024 } else { 3025 DPAA2_SEC_ERR("Invalid crypto type"); 3026 return -EINVAL; 3027 } 3028 3029 session->ctxt_type = DPAA2_SEC_PDCP; 3030 if (cipher_xform) { 3031 session->cipher_key.data = rte_zmalloc(NULL, 3032 cipher_xform->key.length, 3033 RTE_CACHE_LINE_SIZE); 3034 if (session->cipher_key.data == NULL && 3035 cipher_xform->key.length > 0) { 3036 DPAA2_SEC_ERR("No Memory for cipher key"); 3037 rte_free(priv); 3038 return -ENOMEM; 3039 } 3040 session->cipher_key.length = cipher_xform->key.length; 3041 memcpy(session->cipher_key.data, cipher_xform->key.data, 3042 cipher_xform->key.length); 3043 session->dir = 3044 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 3045 DIR_ENC : DIR_DEC; 3046 session->cipher_alg = cipher_xform->algo; 3047 } else { 3048 session->cipher_key.data = NULL; 3049 session->cipher_key.length = 0; 3050 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 3051 session->dir = DIR_ENC; 3052 } 3053 3054 session->pdcp.domain = pdcp_xform->domain; 3055 session->pdcp.bearer = pdcp_xform->bearer; 3056 session->pdcp.pkt_dir = pdcp_xform->pkt_dir; 3057 session->pdcp.sn_size = pdcp_xform->sn_size; 3058 session->pdcp.hfn = pdcp_xform->hfn; 3059 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold; 3060 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd; 3061 /* hfv ovd offset location is stored in iv.offset value*/ 3062 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset; 3063 3064 cipherdata.key = (size_t)session->cipher_key.data; 3065 cipherdata.keylen = session->cipher_key.length; 3066 cipherdata.key_enc_flags = 0; 3067 cipherdata.key_type = RTA_DATA_IMM; 3068 3069 switch (session->cipher_alg) { 3070 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 3071 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW; 3072 break; 3073 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 3074 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC; 3075 break; 3076 case RTE_CRYPTO_CIPHER_AES_CTR: 3077 cipherdata.algtype = PDCP_CIPHER_TYPE_AES; 3078 break; 3079 case RTE_CRYPTO_CIPHER_NULL: 3080 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL; 3081 break; 3082 default: 3083 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 3084 session->cipher_alg); 3085 goto out; 3086 } 3087 3088 if (auth_xform) { 3089 session->auth_key.data = rte_zmalloc(NULL, 3090 auth_xform->key.length, 3091 RTE_CACHE_LINE_SIZE); 3092 if (!session->auth_key.data && 3093 auth_xform->key.length > 0) { 3094 DPAA2_SEC_ERR("No Memory for auth key"); 3095 rte_free(session->cipher_key.data); 3096 rte_free(priv); 3097 return -ENOMEM; 3098 } 3099 session->auth_key.length = auth_xform->key.length; 3100 memcpy(session->auth_key.data, auth_xform->key.data, 3101 auth_xform->key.length); 3102 session->auth_alg = auth_xform->algo; 3103 } else { 3104 session->auth_key.data = NULL; 3105 session->auth_key.length = 0; 3106 session->auth_alg = 0; 3107 } 3108 authdata.key = (size_t)session->auth_key.data; 3109 authdata.keylen = session->auth_key.length; 3110 authdata.key_enc_flags = 0; 3111 authdata.key_type = RTA_DATA_IMM; 3112 3113 if (session->auth_alg) { 3114 switch (session->auth_alg) { 3115 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 3116 authdata.algtype = PDCP_AUTH_TYPE_SNOW; 3117 break; 3118 case RTE_CRYPTO_AUTH_ZUC_EIA3: 3119 authdata.algtype = PDCP_AUTH_TYPE_ZUC; 3120 break; 3121 case RTE_CRYPTO_AUTH_AES_CMAC: 3122 authdata.algtype = PDCP_AUTH_TYPE_AES; 3123 break; 3124 case RTE_CRYPTO_AUTH_NULL: 3125 authdata.algtype = PDCP_AUTH_TYPE_NULL; 3126 break; 3127 default: 3128 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 3129 session->auth_alg); 3130 goto out; 3131 } 3132 3133 p_authdata = &authdata; 3134 } else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 3135 DPAA2_SEC_ERR("Crypto: Integrity must for c-plane"); 3136 goto out; 3137 } 3138 3139 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 3140 if (session->dir == DIR_ENC) 3141 bufsize = cnstr_shdsc_pdcp_c_plane_encap( 3142 priv->flc_desc[0].desc, 1, swap, 3143 pdcp_xform->hfn, 3144 session->pdcp.sn_size, 3145 pdcp_xform->bearer, 3146 pdcp_xform->pkt_dir, 3147 pdcp_xform->hfn_threshold, 3148 &cipherdata, &authdata, 3149 0); 3150 else if (session->dir == DIR_DEC) 3151 bufsize = cnstr_shdsc_pdcp_c_plane_decap( 3152 priv->flc_desc[0].desc, 1, swap, 3153 pdcp_xform->hfn, 3154 session->pdcp.sn_size, 3155 pdcp_xform->bearer, 3156 pdcp_xform->pkt_dir, 3157 pdcp_xform->hfn_threshold, 3158 &cipherdata, &authdata, 3159 0); 3160 } else { 3161 if (session->dir == DIR_ENC) 3162 bufsize = cnstr_shdsc_pdcp_u_plane_encap( 3163 priv->flc_desc[0].desc, 1, swap, 3164 session->pdcp.sn_size, 3165 pdcp_xform->hfn, 3166 pdcp_xform->bearer, 3167 pdcp_xform->pkt_dir, 3168 pdcp_xform->hfn_threshold, 3169 &cipherdata, p_authdata, 0); 3170 else if (session->dir == DIR_DEC) 3171 bufsize = cnstr_shdsc_pdcp_u_plane_decap( 3172 priv->flc_desc[0].desc, 1, swap, 3173 session->pdcp.sn_size, 3174 pdcp_xform->hfn, 3175 pdcp_xform->bearer, 3176 pdcp_xform->pkt_dir, 3177 pdcp_xform->hfn_threshold, 3178 &cipherdata, p_authdata, 0); 3179 } 3180 3181 if (bufsize < 0) { 3182 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 3183 goto out; 3184 } 3185 3186 /* Enable the stashing control bit */ 3187 DPAA2_SET_FLC_RSC(flc); 3188 flc->word2_rflc_31_0 = lower_32_bits( 3189 (size_t)&(((struct dpaa2_sec_qp *) 3190 dev->data->queue_pairs[0])->rx_vq) | 0x14); 3191 flc->word3_rflc_63_32 = upper_32_bits( 3192 (size_t)&(((struct dpaa2_sec_qp *) 3193 dev->data->queue_pairs[0])->rx_vq)); 3194 3195 flc->word1_sdl = (uint8_t)bufsize; 3196 3197 /* TODO - check the perf impact or 3198 * align as per descriptor type 3199 * Set EWS bit i.e. enable write-safe 3200 * DPAA2_SET_FLC_EWS(flc); 3201 */ 3202 3203 /* Set BS = 1 i.e reuse input buffers as output buffers */ 3204 DPAA2_SET_FLC_REUSE_BS(flc); 3205 /* Set FF = 10; reuse input buffers if they provide sufficient space */ 3206 DPAA2_SET_FLC_REUSE_FF(flc); 3207 3208 session->ctxt = priv; 3209 3210 return 0; 3211 out: 3212 rte_free(session->auth_key.data); 3213 rte_free(session->cipher_key.data); 3214 rte_free(priv); 3215 return -1; 3216 } 3217 3218 static int 3219 dpaa2_sec_security_session_create(void *dev, 3220 struct rte_security_session_conf *conf, 3221 struct rte_security_session *sess, 3222 struct rte_mempool *mempool) 3223 { 3224 void *sess_private_data; 3225 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; 3226 int ret; 3227 3228 if (rte_mempool_get(mempool, &sess_private_data)) { 3229 DPAA2_SEC_ERR("Couldn't get object from session mempool"); 3230 return -ENOMEM; 3231 } 3232 3233 switch (conf->protocol) { 3234 case RTE_SECURITY_PROTOCOL_IPSEC: 3235 ret = dpaa2_sec_set_ipsec_session(cdev, conf, 3236 sess_private_data); 3237 break; 3238 case RTE_SECURITY_PROTOCOL_MACSEC: 3239 return -ENOTSUP; 3240 case RTE_SECURITY_PROTOCOL_PDCP: 3241 ret = dpaa2_sec_set_pdcp_session(cdev, conf, 3242 sess_private_data); 3243 break; 3244 default: 3245 return -EINVAL; 3246 } 3247 if (ret != 0) { 3248 DPAA2_SEC_ERR("Failed to configure session parameters"); 3249 /* Return session to mempool */ 3250 rte_mempool_put(mempool, sess_private_data); 3251 return ret; 3252 } 3253 3254 set_sec_session_private_data(sess, sess_private_data); 3255 3256 return ret; 3257 } 3258 3259 /** Clear the memory of session so it doesn't leave key material behind */ 3260 static int 3261 dpaa2_sec_security_session_destroy(void *dev __rte_unused, 3262 struct rte_security_session *sess) 3263 { 3264 PMD_INIT_FUNC_TRACE(); 3265 void *sess_priv = get_sec_session_private_data(sess); 3266 3267 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv; 3268 3269 if (sess_priv) { 3270 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 3271 3272 rte_free(s->ctxt); 3273 rte_free(s->cipher_key.data); 3274 rte_free(s->auth_key.data); 3275 memset(s, 0, sizeof(dpaa2_sec_session)); 3276 set_sec_session_private_data(sess, NULL); 3277 rte_mempool_put(sess_mp, sess_priv); 3278 } 3279 return 0; 3280 } 3281 #endif 3282 static int 3283 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev, 3284 struct rte_crypto_sym_xform *xform, 3285 struct rte_cryptodev_sym_session *sess, 3286 struct rte_mempool *mempool) 3287 { 3288 void *sess_private_data; 3289 int ret; 3290 3291 if (rte_mempool_get(mempool, &sess_private_data)) { 3292 DPAA2_SEC_ERR("Couldn't get object from session mempool"); 3293 return -ENOMEM; 3294 } 3295 3296 ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data); 3297 if (ret != 0) { 3298 DPAA2_SEC_ERR("Failed to configure session parameters"); 3299 /* Return session to mempool */ 3300 rte_mempool_put(mempool, sess_private_data); 3301 return ret; 3302 } 3303 3304 set_sym_session_private_data(sess, dev->driver_id, 3305 sess_private_data); 3306 3307 return 0; 3308 } 3309 3310 /** Clear the memory of session so it doesn't leave key material behind */ 3311 static void 3312 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev, 3313 struct rte_cryptodev_sym_session *sess) 3314 { 3315 PMD_INIT_FUNC_TRACE(); 3316 uint8_t index = dev->driver_id; 3317 void *sess_priv = get_sym_session_private_data(sess, index); 3318 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv; 3319 3320 if (sess_priv) { 3321 rte_free(s->ctxt); 3322 rte_free(s->cipher_key.data); 3323 rte_free(s->auth_key.data); 3324 memset(s, 0, sizeof(dpaa2_sec_session)); 3325 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 3326 set_sym_session_private_data(sess, index, NULL); 3327 rte_mempool_put(sess_mp, sess_priv); 3328 } 3329 } 3330 3331 static int 3332 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused, 3333 struct rte_cryptodev_config *config __rte_unused) 3334 { 3335 PMD_INIT_FUNC_TRACE(); 3336 3337 return 0; 3338 } 3339 3340 static int 3341 dpaa2_sec_dev_start(struct rte_cryptodev *dev) 3342 { 3343 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3344 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3345 struct dpseci_attr attr; 3346 struct dpaa2_queue *dpaa2_q; 3347 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3348 dev->data->queue_pairs; 3349 struct dpseci_rx_queue_attr rx_attr; 3350 struct dpseci_tx_queue_attr tx_attr; 3351 int ret, i; 3352 3353 PMD_INIT_FUNC_TRACE(); 3354 3355 memset(&attr, 0, sizeof(struct dpseci_attr)); 3356 3357 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token); 3358 if (ret) { 3359 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED", 3360 priv->hw_id); 3361 goto get_attr_failure; 3362 } 3363 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr); 3364 if (ret) { 3365 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC"); 3366 goto get_attr_failure; 3367 } 3368 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) { 3369 dpaa2_q = &qp[i]->rx_vq; 3370 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 3371 &rx_attr); 3372 dpaa2_q->fqid = rx_attr.fqid; 3373 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid); 3374 } 3375 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) { 3376 dpaa2_q = &qp[i]->tx_vq; 3377 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 3378 &tx_attr); 3379 dpaa2_q->fqid = tx_attr.fqid; 3380 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid); 3381 } 3382 3383 return 0; 3384 get_attr_failure: 3385 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 3386 return -1; 3387 } 3388 3389 static void 3390 dpaa2_sec_dev_stop(struct rte_cryptodev *dev) 3391 { 3392 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3393 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3394 int ret; 3395 3396 PMD_INIT_FUNC_TRACE(); 3397 3398 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 3399 if (ret) { 3400 DPAA2_SEC_ERR("Failure in disabling dpseci %d device", 3401 priv->hw_id); 3402 return; 3403 } 3404 3405 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token); 3406 if (ret < 0) { 3407 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret); 3408 return; 3409 } 3410 } 3411 3412 static int 3413 dpaa2_sec_dev_close(struct rte_cryptodev *dev) 3414 { 3415 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3416 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3417 int ret; 3418 3419 PMD_INIT_FUNC_TRACE(); 3420 3421 /* Function is reverse of dpaa2_sec_dev_init. 3422 * It does the following: 3423 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id 3424 * 2. Close the DPSECI device 3425 * 3. Free the allocated resources. 3426 */ 3427 3428 /*Close the device at underlying layer*/ 3429 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token); 3430 if (ret) { 3431 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret); 3432 return -1; 3433 } 3434 3435 /*Free the allocated memory for ethernet private data and dpseci*/ 3436 priv->hw = NULL; 3437 rte_free(dpseci); 3438 3439 return 0; 3440 } 3441 3442 static void 3443 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev, 3444 struct rte_cryptodev_info *info) 3445 { 3446 struct dpaa2_sec_dev_private *internals = dev->data->dev_private; 3447 3448 PMD_INIT_FUNC_TRACE(); 3449 if (info != NULL) { 3450 info->max_nb_queue_pairs = internals->max_nb_queue_pairs; 3451 info->feature_flags = dev->feature_flags; 3452 info->capabilities = dpaa2_sec_capabilities; 3453 /* No limit of number of sessions */ 3454 info->sym.max_nb_sessions = 0; 3455 info->driver_id = cryptodev_driver_id; 3456 } 3457 } 3458 3459 static 3460 void dpaa2_sec_stats_get(struct rte_cryptodev *dev, 3461 struct rte_cryptodev_stats *stats) 3462 { 3463 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3464 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3465 struct dpseci_sec_counters counters = {0}; 3466 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3467 dev->data->queue_pairs; 3468 int ret, i; 3469 3470 PMD_INIT_FUNC_TRACE(); 3471 if (stats == NULL) { 3472 DPAA2_SEC_ERR("Invalid stats ptr NULL"); 3473 return; 3474 } 3475 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 3476 if (qp[i] == NULL) { 3477 DPAA2_SEC_DEBUG("Uninitialised queue pair"); 3478 continue; 3479 } 3480 3481 stats->enqueued_count += qp[i]->tx_vq.tx_pkts; 3482 stats->dequeued_count += qp[i]->rx_vq.rx_pkts; 3483 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts; 3484 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts; 3485 } 3486 3487 ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token, 3488 &counters); 3489 if (ret) { 3490 DPAA2_SEC_ERR("SEC counters failed"); 3491 } else { 3492 DPAA2_SEC_INFO("dpseci hardware stats:" 3493 "\n\tNum of Requests Dequeued = %" PRIu64 3494 "\n\tNum of Outbound Encrypt Requests = %" PRIu64 3495 "\n\tNum of Inbound Decrypt Requests = %" PRIu64 3496 "\n\tNum of Outbound Bytes Encrypted = %" PRIu64 3497 "\n\tNum of Outbound Bytes Protected = %" PRIu64 3498 "\n\tNum of Inbound Bytes Decrypted = %" PRIu64 3499 "\n\tNum of Inbound Bytes Validated = %" PRIu64, 3500 counters.dequeued_requests, 3501 counters.ob_enc_requests, 3502 counters.ib_dec_requests, 3503 counters.ob_enc_bytes, 3504 counters.ob_prot_bytes, 3505 counters.ib_dec_bytes, 3506 counters.ib_valid_bytes); 3507 } 3508 } 3509 3510 static 3511 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev) 3512 { 3513 int i; 3514 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3515 (dev->data->queue_pairs); 3516 3517 PMD_INIT_FUNC_TRACE(); 3518 3519 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 3520 if (qp[i] == NULL) { 3521 DPAA2_SEC_DEBUG("Uninitialised queue pair"); 3522 continue; 3523 } 3524 qp[i]->tx_vq.rx_pkts = 0; 3525 qp[i]->tx_vq.tx_pkts = 0; 3526 qp[i]->tx_vq.err_pkts = 0; 3527 qp[i]->rx_vq.rx_pkts = 0; 3528 qp[i]->rx_vq.tx_pkts = 0; 3529 qp[i]->rx_vq.err_pkts = 0; 3530 } 3531 } 3532 3533 static void __attribute__((hot)) 3534 dpaa2_sec_process_parallel_event(struct qbman_swp *swp, 3535 const struct qbman_fd *fd, 3536 const struct qbman_result *dq, 3537 struct dpaa2_queue *rxq, 3538 struct rte_event *ev) 3539 { 3540 /* Prefetching mbuf */ 3541 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)- 3542 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size)); 3543 3544 /* Prefetching ipsec crypto_op stored in priv data of mbuf */ 3545 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64)); 3546 3547 ev->flow_id = rxq->ev.flow_id; 3548 ev->sub_event_type = rxq->ev.sub_event_type; 3549 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3550 ev->op = RTE_EVENT_OP_NEW; 3551 ev->sched_type = rxq->ev.sched_type; 3552 ev->queue_id = rxq->ev.queue_id; 3553 ev->priority = rxq->ev.priority; 3554 ev->event_ptr = sec_fd_to_mbuf(fd); 3555 3556 qbman_swp_dqrr_consume(swp, dq); 3557 } 3558 static void 3559 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __attribute__((unused)), 3560 const struct qbman_fd *fd, 3561 const struct qbman_result *dq, 3562 struct dpaa2_queue *rxq, 3563 struct rte_event *ev) 3564 { 3565 uint8_t dqrr_index; 3566 struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr; 3567 /* Prefetching mbuf */ 3568 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)- 3569 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size)); 3570 3571 /* Prefetching ipsec crypto_op stored in priv data of mbuf */ 3572 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64)); 3573 3574 ev->flow_id = rxq->ev.flow_id; 3575 ev->sub_event_type = rxq->ev.sub_event_type; 3576 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3577 ev->op = RTE_EVENT_OP_NEW; 3578 ev->sched_type = rxq->ev.sched_type; 3579 ev->queue_id = rxq->ev.queue_id; 3580 ev->priority = rxq->ev.priority; 3581 3582 ev->event_ptr = sec_fd_to_mbuf(fd); 3583 dqrr_index = qbman_get_dqrr_idx(dq); 3584 crypto_op->sym->m_src->seqn = dqrr_index + 1; 3585 DPAA2_PER_LCORE_DQRR_SIZE++; 3586 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; 3587 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src; 3588 } 3589 3590 int 3591 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev, 3592 int qp_id, 3593 struct dpaa2_dpcon_dev *dpcon, 3594 const struct rte_event *event) 3595 { 3596 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3597 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3598 struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id]; 3599 struct dpseci_rx_queue_cfg cfg; 3600 uint8_t priority; 3601 int ret; 3602 3603 if (event->sched_type == RTE_SCHED_TYPE_PARALLEL) 3604 qp->rx_vq.cb = dpaa2_sec_process_parallel_event; 3605 else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) 3606 qp->rx_vq.cb = dpaa2_sec_process_atomic_event; 3607 else 3608 return -EINVAL; 3609 3610 priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / event->priority) * 3611 (dpcon->num_priorities - 1); 3612 3613 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 3614 cfg.options = DPSECI_QUEUE_OPT_DEST; 3615 cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON; 3616 cfg.dest_cfg.dest_id = dpcon->dpcon_id; 3617 cfg.dest_cfg.priority = priority; 3618 3619 cfg.options |= DPSECI_QUEUE_OPT_USER_CTX; 3620 cfg.user_ctx = (size_t)(qp); 3621 if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) { 3622 cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION; 3623 cfg.order_preservation_en = 1; 3624 } 3625 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 3626 qp_id, &cfg); 3627 if (ret) { 3628 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret); 3629 return ret; 3630 } 3631 3632 memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event)); 3633 3634 return 0; 3635 } 3636 3637 int 3638 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev, 3639 int qp_id) 3640 { 3641 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3642 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3643 struct dpseci_rx_queue_cfg cfg; 3644 int ret; 3645 3646 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 3647 cfg.options = DPSECI_QUEUE_OPT_DEST; 3648 cfg.dest_cfg.dest_type = DPSECI_DEST_NONE; 3649 3650 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 3651 qp_id, &cfg); 3652 if (ret) 3653 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret); 3654 3655 return ret; 3656 } 3657 3658 static struct rte_cryptodev_ops crypto_ops = { 3659 .dev_configure = dpaa2_sec_dev_configure, 3660 .dev_start = dpaa2_sec_dev_start, 3661 .dev_stop = dpaa2_sec_dev_stop, 3662 .dev_close = dpaa2_sec_dev_close, 3663 .dev_infos_get = dpaa2_sec_dev_infos_get, 3664 .stats_get = dpaa2_sec_stats_get, 3665 .stats_reset = dpaa2_sec_stats_reset, 3666 .queue_pair_setup = dpaa2_sec_queue_pair_setup, 3667 .queue_pair_release = dpaa2_sec_queue_pair_release, 3668 .sym_session_get_size = dpaa2_sec_sym_session_get_size, 3669 .sym_session_configure = dpaa2_sec_sym_session_configure, 3670 .sym_session_clear = dpaa2_sec_sym_session_clear, 3671 }; 3672 3673 #ifdef RTE_LIBRTE_SECURITY 3674 static const struct rte_security_capability * 3675 dpaa2_sec_capabilities_get(void *device __rte_unused) 3676 { 3677 return dpaa2_sec_security_cap; 3678 } 3679 3680 static const struct rte_security_ops dpaa2_sec_security_ops = { 3681 .session_create = dpaa2_sec_security_session_create, 3682 .session_update = NULL, 3683 .session_stats_get = NULL, 3684 .session_destroy = dpaa2_sec_security_session_destroy, 3685 .set_pkt_metadata = NULL, 3686 .capabilities_get = dpaa2_sec_capabilities_get 3687 }; 3688 #endif 3689 3690 static int 3691 dpaa2_sec_uninit(const struct rte_cryptodev *dev) 3692 { 3693 struct dpaa2_sec_dev_private *internals = dev->data->dev_private; 3694 3695 rte_free(dev->security_ctx); 3696 3697 rte_mempool_free(internals->fle_pool); 3698 3699 DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u", 3700 dev->data->name, rte_socket_id()); 3701 3702 return 0; 3703 } 3704 3705 static int 3706 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev) 3707 { 3708 struct dpaa2_sec_dev_private *internals; 3709 struct rte_device *dev = cryptodev->device; 3710 struct rte_dpaa2_device *dpaa2_dev; 3711 #ifdef RTE_LIBRTE_SECURITY 3712 struct rte_security_ctx *security_instance; 3713 #endif 3714 struct fsl_mc_io *dpseci; 3715 uint16_t token; 3716 struct dpseci_attr attr; 3717 int retcode, hw_id; 3718 char str[30]; 3719 3720 PMD_INIT_FUNC_TRACE(); 3721 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 3722 if (dpaa2_dev == NULL) { 3723 DPAA2_SEC_ERR("DPAA2 SEC device not found"); 3724 return -1; 3725 } 3726 hw_id = dpaa2_dev->object_id; 3727 3728 cryptodev->driver_id = cryptodev_driver_id; 3729 cryptodev->dev_ops = &crypto_ops; 3730 3731 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst; 3732 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst; 3733 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 3734 RTE_CRYPTODEV_FF_HW_ACCELERATED | 3735 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 3736 RTE_CRYPTODEV_FF_SECURITY | 3737 RTE_CRYPTODEV_FF_IN_PLACE_SGL | 3738 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | 3739 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | 3740 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | 3741 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; 3742 3743 internals = cryptodev->data->dev_private; 3744 3745 /* 3746 * For secondary processes, we don't initialise any further as primary 3747 * has already done this work. Only check we don't need a different 3748 * RX function 3749 */ 3750 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 3751 DPAA2_SEC_DEBUG("Device already init by primary process"); 3752 return 0; 3753 } 3754 #ifdef RTE_LIBRTE_SECURITY 3755 /* Initialize security_ctx only for primary process*/ 3756 security_instance = rte_malloc("rte_security_instances_ops", 3757 sizeof(struct rte_security_ctx), 0); 3758 if (security_instance == NULL) 3759 return -ENOMEM; 3760 security_instance->device = (void *)cryptodev; 3761 security_instance->ops = &dpaa2_sec_security_ops; 3762 security_instance->sess_cnt = 0; 3763 cryptodev->security_ctx = security_instance; 3764 #endif 3765 /*Open the rte device via MC and save the handle for further use*/ 3766 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1, 3767 sizeof(struct fsl_mc_io), 0); 3768 if (!dpseci) { 3769 DPAA2_SEC_ERR( 3770 "Error in allocating the memory for dpsec object"); 3771 return -1; 3772 } 3773 dpseci->regs = rte_mcp_ptr_list[0]; 3774 3775 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token); 3776 if (retcode != 0) { 3777 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x", 3778 retcode); 3779 goto init_error; 3780 } 3781 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr); 3782 if (retcode != 0) { 3783 DPAA2_SEC_ERR( 3784 "Cannot get dpsec device attributed: Error = %x", 3785 retcode); 3786 goto init_error; 3787 } 3788 snprintf(cryptodev->data->name, sizeof(cryptodev->data->name), 3789 "dpsec-%u", hw_id); 3790 3791 internals->max_nb_queue_pairs = attr.num_tx_queues; 3792 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs; 3793 internals->hw = dpseci; 3794 internals->token = token; 3795 3796 snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d", 3797 getpid(), cryptodev->data->dev_id); 3798 internals->fle_pool = rte_mempool_create((const char *)str, 3799 FLE_POOL_NUM_BUFS, 3800 FLE_POOL_BUF_SIZE, 3801 FLE_POOL_CACHE_SIZE, 0, 3802 NULL, NULL, NULL, NULL, 3803 SOCKET_ID_ANY, 0); 3804 if (!internals->fle_pool) { 3805 DPAA2_SEC_ERR("Mempool (%s) creation failed", str); 3806 goto init_error; 3807 } 3808 3809 DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name); 3810 return 0; 3811 3812 init_error: 3813 DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name); 3814 3815 /* dpaa2_sec_uninit(crypto_dev_name); */ 3816 return -EFAULT; 3817 } 3818 3819 static int 3820 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused, 3821 struct rte_dpaa2_device *dpaa2_dev) 3822 { 3823 struct rte_cryptodev *cryptodev; 3824 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 3825 3826 int retval; 3827 3828 snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d", 3829 dpaa2_dev->object_id); 3830 3831 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id()); 3832 if (cryptodev == NULL) 3833 return -ENOMEM; 3834 3835 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 3836 cryptodev->data->dev_private = rte_zmalloc_socket( 3837 "cryptodev private structure", 3838 sizeof(struct dpaa2_sec_dev_private), 3839 RTE_CACHE_LINE_SIZE, 3840 rte_socket_id()); 3841 3842 if (cryptodev->data->dev_private == NULL) 3843 rte_panic("Cannot allocate memzone for private " 3844 "device data"); 3845 } 3846 3847 dpaa2_dev->cryptodev = cryptodev; 3848 cryptodev->device = &dpaa2_dev->device; 3849 3850 /* init user callbacks */ 3851 TAILQ_INIT(&(cryptodev->link_intr_cbs)); 3852 3853 if (dpaa2_svr_family == SVR_LX2160A) 3854 rta_set_sec_era(RTA_SEC_ERA_10); 3855 3856 DPAA2_SEC_INFO("2-SEC ERA is %d", rta_get_sec_era()); 3857 3858 /* Invoke PMD device initialization function */ 3859 retval = dpaa2_sec_dev_init(cryptodev); 3860 if (retval == 0) 3861 return 0; 3862 3863 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 3864 rte_free(cryptodev->data->dev_private); 3865 3866 cryptodev->attached = RTE_CRYPTODEV_DETACHED; 3867 3868 return -ENXIO; 3869 } 3870 3871 static int 3872 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev) 3873 { 3874 struct rte_cryptodev *cryptodev; 3875 int ret; 3876 3877 cryptodev = dpaa2_dev->cryptodev; 3878 if (cryptodev == NULL) 3879 return -ENODEV; 3880 3881 ret = dpaa2_sec_uninit(cryptodev); 3882 if (ret) 3883 return ret; 3884 3885 return rte_cryptodev_pmd_destroy(cryptodev); 3886 } 3887 3888 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = { 3889 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA, 3890 .drv_type = DPAA2_CRYPTO, 3891 .driver = { 3892 .name = "DPAA2 SEC PMD" 3893 }, 3894 .probe = cryptodev_dpaa2_sec_probe, 3895 .remove = cryptodev_dpaa2_sec_remove, 3896 }; 3897 3898 static struct cryptodev_driver dpaa2_sec_crypto_drv; 3899 3900 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver); 3901 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, 3902 rte_dpaa2_sec_driver.driver, cryptodev_driver_id); 3903 3904 RTE_INIT(dpaa2_sec_init_log) 3905 { 3906 /* Bus level logs */ 3907 dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2"); 3908 if (dpaa2_logtype_sec >= 0) 3909 rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE); 3910 } 3911