1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2016-2019 NXP 5 * 6 */ 7 8 #include <time.h> 9 #include <net/if.h> 10 #include <unistd.h> 11 12 #include <rte_ip.h> 13 #include <rte_mbuf.h> 14 #include <rte_cryptodev.h> 15 #include <rte_malloc.h> 16 #include <rte_memcpy.h> 17 #include <rte_string_fns.h> 18 #include <rte_cycles.h> 19 #include <rte_kvargs.h> 20 #include <rte_dev.h> 21 #include <rte_cryptodev_pmd.h> 22 #include <rte_common.h> 23 #include <rte_fslmc.h> 24 #include <fslmc_vfio.h> 25 #include <dpaa2_hw_pvt.h> 26 #include <dpaa2_hw_dpio.h> 27 #include <dpaa2_hw_mempool.h> 28 #include <fsl_dpopr.h> 29 #include <fsl_dpseci.h> 30 #include <fsl_mc_sys.h> 31 32 #include "dpaa2_sec_priv.h" 33 #include "dpaa2_sec_event.h" 34 #include "dpaa2_sec_logs.h" 35 36 /* RTA header files */ 37 #include <desc/ipsec.h> 38 #include <desc/pdcp.h> 39 #include <desc/algo.h> 40 41 /* Minimum job descriptor consists of a oneword job descriptor HEADER and 42 * a pointer to the shared descriptor 43 */ 44 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ) 45 #define FSL_VENDOR_ID 0x1957 46 #define FSL_DEVICE_ID 0x410 47 #define FSL_SUBSYSTEM_SEC 1 48 #define FSL_MC_DPSECI_DEVID 3 49 50 #define NO_PREFETCH 0 51 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */ 52 #define FLE_POOL_NUM_BUFS 32000 53 #define FLE_POOL_BUF_SIZE 256 54 #define FLE_POOL_CACHE_SIZE 512 55 #define FLE_SG_MEM_SIZE(num) (FLE_POOL_BUF_SIZE + ((num) * 32)) 56 #define SEC_FLC_DHR_OUTBOUND -114 57 #define SEC_FLC_DHR_INBOUND 0 58 59 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8; 60 61 static uint8_t cryptodev_driver_id; 62 63 int dpaa2_logtype_sec; 64 65 #ifdef RTE_LIBRTE_SECURITY 66 static inline int 67 build_proto_compound_sg_fd(dpaa2_sec_session *sess, 68 struct rte_crypto_op *op, 69 struct qbman_fd *fd, uint16_t bpid) 70 { 71 struct rte_crypto_sym_op *sym_op = op->sym; 72 struct ctxt_priv *priv = sess->ctxt; 73 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 74 struct sec_flow_context *flc; 75 struct rte_mbuf *mbuf; 76 uint32_t in_len = 0, out_len = 0; 77 78 if (sym_op->m_dst) 79 mbuf = sym_op->m_dst; 80 else 81 mbuf = sym_op->m_src; 82 83 /* first FLE entry used to store mbuf and session ctxt */ 84 fle = (struct qbman_fle *)rte_malloc(NULL, 85 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 86 RTE_CACHE_LINE_SIZE); 87 if (unlikely(!fle)) { 88 DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE"); 89 return -ENOMEM; 90 } 91 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 92 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 93 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 94 95 /* Save the shared descriptor */ 96 flc = &priv->flc_desc[0].flc; 97 98 op_fle = fle + 1; 99 ip_fle = fle + 2; 100 sge = fle + 3; 101 102 if (likely(bpid < MAX_BPID)) { 103 DPAA2_SET_FD_BPID(fd, bpid); 104 DPAA2_SET_FLE_BPID(op_fle, bpid); 105 DPAA2_SET_FLE_BPID(ip_fle, bpid); 106 } else { 107 DPAA2_SET_FD_IVP(fd); 108 DPAA2_SET_FLE_IVP(op_fle); 109 DPAA2_SET_FLE_IVP(ip_fle); 110 } 111 112 /* Configure FD as a FRAME LIST */ 113 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 114 DPAA2_SET_FD_COMPOUND_FMT(fd); 115 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 116 117 /* Configure Output FLE with Scatter/Gather Entry */ 118 DPAA2_SET_FLE_SG_EXT(op_fle); 119 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 120 121 /* Configure Output SGE for Encap/Decap */ 122 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 123 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 124 /* o/p segs */ 125 while (mbuf->next) { 126 sge->length = mbuf->data_len; 127 out_len += sge->length; 128 sge++; 129 mbuf = mbuf->next; 130 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 131 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 132 } 133 /* using buf_len for last buf - so that extra data can be added */ 134 sge->length = mbuf->buf_len - mbuf->data_off; 135 out_len += sge->length; 136 137 DPAA2_SET_FLE_FIN(sge); 138 op_fle->length = out_len; 139 140 sge++; 141 mbuf = sym_op->m_src; 142 143 /* Configure Input FLE with Scatter/Gather Entry */ 144 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 145 DPAA2_SET_FLE_SG_EXT(ip_fle); 146 DPAA2_SET_FLE_FIN(ip_fle); 147 148 /* Configure input SGE for Encap/Decap */ 149 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 150 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 151 sge->length = mbuf->data_len; 152 in_len += sge->length; 153 154 mbuf = mbuf->next; 155 /* i/p segs */ 156 while (mbuf) { 157 sge++; 158 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 159 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 160 sge->length = mbuf->data_len; 161 in_len += sge->length; 162 mbuf = mbuf->next; 163 } 164 ip_fle->length = in_len; 165 DPAA2_SET_FLE_FIN(sge); 166 167 /* In case of PDCP, per packet HFN is stored in 168 * mbuf priv after sym_op. 169 */ 170 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) { 171 uint32_t hfn_ovd = *((uint8_t *)op + sess->pdcp.hfn_ovd_offset); 172 /*enable HFN override override */ 173 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd); 174 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd); 175 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd); 176 } 177 DPAA2_SET_FD_LEN(fd, ip_fle->length); 178 179 return 0; 180 } 181 182 static inline int 183 build_proto_compound_fd(dpaa2_sec_session *sess, 184 struct rte_crypto_op *op, 185 struct qbman_fd *fd, uint16_t bpid) 186 { 187 struct rte_crypto_sym_op *sym_op = op->sym; 188 struct ctxt_priv *priv = sess->ctxt; 189 struct qbman_fle *fle, *ip_fle, *op_fle; 190 struct sec_flow_context *flc; 191 struct rte_mbuf *src_mbuf = sym_op->m_src; 192 struct rte_mbuf *dst_mbuf = sym_op->m_dst; 193 int retval; 194 195 if (!dst_mbuf) 196 dst_mbuf = src_mbuf; 197 198 /* Save the shared descriptor */ 199 flc = &priv->flc_desc[0].flc; 200 201 /* we are using the first FLE entry to store Mbuf */ 202 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 203 if (retval) { 204 DPAA2_SEC_DP_ERR("Memory alloc failed"); 205 return -ENOMEM; 206 } 207 memset(fle, 0, FLE_POOL_BUF_SIZE); 208 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 209 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 210 211 op_fle = fle + 1; 212 ip_fle = fle + 2; 213 214 if (likely(bpid < MAX_BPID)) { 215 DPAA2_SET_FD_BPID(fd, bpid); 216 DPAA2_SET_FLE_BPID(op_fle, bpid); 217 DPAA2_SET_FLE_BPID(ip_fle, bpid); 218 } else { 219 DPAA2_SET_FD_IVP(fd); 220 DPAA2_SET_FLE_IVP(op_fle); 221 DPAA2_SET_FLE_IVP(ip_fle); 222 } 223 224 /* Configure FD as a FRAME LIST */ 225 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 226 DPAA2_SET_FD_COMPOUND_FMT(fd); 227 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 228 229 /* Configure Output FLE with dst mbuf data */ 230 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf)); 231 DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off); 232 DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len); 233 234 /* Configure Input FLE with src mbuf data */ 235 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf)); 236 DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off); 237 DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len); 238 239 DPAA2_SET_FD_LEN(fd, ip_fle->length); 240 DPAA2_SET_FLE_FIN(ip_fle); 241 242 /* In case of PDCP, per packet HFN is stored in 243 * mbuf priv after sym_op. 244 */ 245 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) { 246 uint32_t hfn_ovd = *((uint8_t *)op + sess->pdcp.hfn_ovd_offset); 247 /*enable HFN override override */ 248 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd); 249 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd); 250 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd); 251 } 252 253 return 0; 254 255 } 256 257 static inline int 258 build_proto_fd(dpaa2_sec_session *sess, 259 struct rte_crypto_op *op, 260 struct qbman_fd *fd, uint16_t bpid) 261 { 262 struct rte_crypto_sym_op *sym_op = op->sym; 263 if (sym_op->m_dst) 264 return build_proto_compound_fd(sess, op, fd, bpid); 265 266 struct ctxt_priv *priv = sess->ctxt; 267 struct sec_flow_context *flc; 268 struct rte_mbuf *mbuf = sym_op->m_src; 269 270 if (likely(bpid < MAX_BPID)) 271 DPAA2_SET_FD_BPID(fd, bpid); 272 else 273 DPAA2_SET_FD_IVP(fd); 274 275 /* Save the shared descriptor */ 276 flc = &priv->flc_desc[0].flc; 277 278 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 279 DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off); 280 DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len); 281 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 282 283 /* save physical address of mbuf */ 284 op->sym->aead.digest.phys_addr = mbuf->buf_iova; 285 mbuf->buf_iova = (size_t)op; 286 287 return 0; 288 } 289 #endif 290 291 static inline int 292 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess, 293 struct rte_crypto_op *op, 294 struct qbman_fd *fd, __rte_unused uint16_t bpid) 295 { 296 struct rte_crypto_sym_op *sym_op = op->sym; 297 struct ctxt_priv *priv = sess->ctxt; 298 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 299 struct sec_flow_context *flc; 300 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len; 301 int icv_len = sess->digest_length; 302 uint8_t *old_icv; 303 struct rte_mbuf *mbuf; 304 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 305 sess->iv.offset); 306 307 if (sym_op->m_dst) 308 mbuf = sym_op->m_dst; 309 else 310 mbuf = sym_op->m_src; 311 312 /* first FLE entry used to store mbuf and session ctxt */ 313 fle = (struct qbman_fle *)rte_malloc(NULL, 314 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 315 RTE_CACHE_LINE_SIZE); 316 if (unlikely(!fle)) { 317 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE"); 318 return -ENOMEM; 319 } 320 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 321 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 322 DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv); 323 324 op_fle = fle + 1; 325 ip_fle = fle + 2; 326 sge = fle + 3; 327 328 /* Save the shared descriptor */ 329 flc = &priv->flc_desc[0].flc; 330 331 /* Configure FD as a FRAME LIST */ 332 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 333 DPAA2_SET_FD_COMPOUND_FMT(fd); 334 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 335 336 DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n" 337 "iv-len=%d data_off: 0x%x\n", 338 sym_op->aead.data.offset, 339 sym_op->aead.data.length, 340 sess->digest_length, 341 sess->iv.length, 342 sym_op->m_src->data_off); 343 344 /* Configure Output FLE with Scatter/Gather Entry */ 345 DPAA2_SET_FLE_SG_EXT(op_fle); 346 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 347 348 if (auth_only_len) 349 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len); 350 351 op_fle->length = (sess->dir == DIR_ENC) ? 352 (sym_op->aead.data.length + icv_len) : 353 sym_op->aead.data.length; 354 355 /* Configure Output SGE for Encap/Decap */ 356 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 357 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset); 358 sge->length = mbuf->data_len - sym_op->aead.data.offset; 359 360 mbuf = mbuf->next; 361 /* o/p segs */ 362 while (mbuf) { 363 sge++; 364 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 365 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 366 sge->length = mbuf->data_len; 367 mbuf = mbuf->next; 368 } 369 sge->length -= icv_len; 370 371 if (sess->dir == DIR_ENC) { 372 sge++; 373 DPAA2_SET_FLE_ADDR(sge, 374 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data)); 375 sge->length = icv_len; 376 } 377 DPAA2_SET_FLE_FIN(sge); 378 379 sge++; 380 mbuf = sym_op->m_src; 381 382 /* Configure Input FLE with Scatter/Gather Entry */ 383 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 384 DPAA2_SET_FLE_SG_EXT(ip_fle); 385 DPAA2_SET_FLE_FIN(ip_fle); 386 ip_fle->length = (sess->dir == DIR_ENC) ? 387 (sym_op->aead.data.length + sess->iv.length + auth_only_len) : 388 (sym_op->aead.data.length + sess->iv.length + auth_only_len + 389 icv_len); 390 391 /* Configure Input SGE for Encap/Decap */ 392 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr)); 393 sge->length = sess->iv.length; 394 395 sge++; 396 if (auth_only_len) { 397 DPAA2_SET_FLE_ADDR(sge, 398 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data)); 399 sge->length = auth_only_len; 400 sge++; 401 } 402 403 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 404 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + 405 mbuf->data_off); 406 sge->length = mbuf->data_len - sym_op->aead.data.offset; 407 408 mbuf = mbuf->next; 409 /* i/p segs */ 410 while (mbuf) { 411 sge++; 412 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 413 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 414 sge->length = mbuf->data_len; 415 mbuf = mbuf->next; 416 } 417 418 if (sess->dir == DIR_DEC) { 419 sge++; 420 old_icv = (uint8_t *)(sge + 1); 421 memcpy(old_icv, sym_op->aead.digest.data, icv_len); 422 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 423 sge->length = icv_len; 424 } 425 426 DPAA2_SET_FLE_FIN(sge); 427 if (auth_only_len) { 428 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len); 429 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 430 } 431 DPAA2_SET_FD_LEN(fd, ip_fle->length); 432 433 return 0; 434 } 435 436 static inline int 437 build_authenc_gcm_fd(dpaa2_sec_session *sess, 438 struct rte_crypto_op *op, 439 struct qbman_fd *fd, uint16_t bpid) 440 { 441 struct rte_crypto_sym_op *sym_op = op->sym; 442 struct ctxt_priv *priv = sess->ctxt; 443 struct qbman_fle *fle, *sge; 444 struct sec_flow_context *flc; 445 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len; 446 int icv_len = sess->digest_length, retval; 447 uint8_t *old_icv; 448 struct rte_mbuf *dst; 449 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 450 sess->iv.offset); 451 452 if (sym_op->m_dst) 453 dst = sym_op->m_dst; 454 else 455 dst = sym_op->m_src; 456 457 /* TODO we are using the first FLE entry to store Mbuf and session ctxt. 458 * Currently we donot know which FLE has the mbuf stored. 459 * So while retreiving we can go back 1 FLE from the FD -ADDR 460 * to get the MBUF Addr from the previous FLE. 461 * We can have a better approach to use the inline Mbuf 462 */ 463 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 464 if (retval) { 465 DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE"); 466 return -ENOMEM; 467 } 468 memset(fle, 0, FLE_POOL_BUF_SIZE); 469 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 470 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 471 fle = fle + 1; 472 sge = fle + 2; 473 if (likely(bpid < MAX_BPID)) { 474 DPAA2_SET_FD_BPID(fd, bpid); 475 DPAA2_SET_FLE_BPID(fle, bpid); 476 DPAA2_SET_FLE_BPID(fle + 1, bpid); 477 DPAA2_SET_FLE_BPID(sge, bpid); 478 DPAA2_SET_FLE_BPID(sge + 1, bpid); 479 DPAA2_SET_FLE_BPID(sge + 2, bpid); 480 DPAA2_SET_FLE_BPID(sge + 3, bpid); 481 } else { 482 DPAA2_SET_FD_IVP(fd); 483 DPAA2_SET_FLE_IVP(fle); 484 DPAA2_SET_FLE_IVP((fle + 1)); 485 DPAA2_SET_FLE_IVP(sge); 486 DPAA2_SET_FLE_IVP((sge + 1)); 487 DPAA2_SET_FLE_IVP((sge + 2)); 488 DPAA2_SET_FLE_IVP((sge + 3)); 489 } 490 491 /* Save the shared descriptor */ 492 flc = &priv->flc_desc[0].flc; 493 /* Configure FD as a FRAME LIST */ 494 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 495 DPAA2_SET_FD_COMPOUND_FMT(fd); 496 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 497 498 DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n" 499 "iv-len=%d data_off: 0x%x\n", 500 sym_op->aead.data.offset, 501 sym_op->aead.data.length, 502 sess->digest_length, 503 sess->iv.length, 504 sym_op->m_src->data_off); 505 506 /* Configure Output FLE with Scatter/Gather Entry */ 507 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 508 if (auth_only_len) 509 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 510 fle->length = (sess->dir == DIR_ENC) ? 511 (sym_op->aead.data.length + icv_len) : 512 sym_op->aead.data.length; 513 514 DPAA2_SET_FLE_SG_EXT(fle); 515 516 /* Configure Output SGE for Encap/Decap */ 517 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 518 DPAA2_SET_FLE_OFFSET(sge, dst->data_off + sym_op->aead.data.offset); 519 sge->length = sym_op->aead.data.length; 520 521 if (sess->dir == DIR_ENC) { 522 sge++; 523 DPAA2_SET_FLE_ADDR(sge, 524 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data)); 525 sge->length = sess->digest_length; 526 } 527 DPAA2_SET_FLE_FIN(sge); 528 529 sge++; 530 fle++; 531 532 /* Configure Input FLE with Scatter/Gather Entry */ 533 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 534 DPAA2_SET_FLE_SG_EXT(fle); 535 DPAA2_SET_FLE_FIN(fle); 536 fle->length = (sess->dir == DIR_ENC) ? 537 (sym_op->aead.data.length + sess->iv.length + auth_only_len) : 538 (sym_op->aead.data.length + sess->iv.length + auth_only_len + 539 sess->digest_length); 540 541 /* Configure Input SGE for Encap/Decap */ 542 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr)); 543 sge->length = sess->iv.length; 544 sge++; 545 if (auth_only_len) { 546 DPAA2_SET_FLE_ADDR(sge, 547 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data)); 548 sge->length = auth_only_len; 549 DPAA2_SET_FLE_BPID(sge, bpid); 550 sge++; 551 } 552 553 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 554 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + 555 sym_op->m_src->data_off); 556 sge->length = sym_op->aead.data.length; 557 if (sess->dir == DIR_DEC) { 558 sge++; 559 old_icv = (uint8_t *)(sge + 1); 560 memcpy(old_icv, sym_op->aead.digest.data, 561 sess->digest_length); 562 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 563 sge->length = sess->digest_length; 564 } 565 DPAA2_SET_FLE_FIN(sge); 566 567 if (auth_only_len) { 568 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 569 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 570 } 571 572 DPAA2_SET_FD_LEN(fd, fle->length); 573 return 0; 574 } 575 576 static inline int 577 build_authenc_sg_fd(dpaa2_sec_session *sess, 578 struct rte_crypto_op *op, 579 struct qbman_fd *fd, __rte_unused uint16_t bpid) 580 { 581 struct rte_crypto_sym_op *sym_op = op->sym; 582 struct ctxt_priv *priv = sess->ctxt; 583 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 584 struct sec_flow_context *flc; 585 uint16_t auth_hdr_len = sym_op->cipher.data.offset - 586 sym_op->auth.data.offset; 587 uint16_t auth_tail_len = sym_op->auth.data.length - 588 sym_op->cipher.data.length - auth_hdr_len; 589 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len; 590 int icv_len = sess->digest_length; 591 uint8_t *old_icv; 592 struct rte_mbuf *mbuf; 593 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 594 sess->iv.offset); 595 596 if (sym_op->m_dst) 597 mbuf = sym_op->m_dst; 598 else 599 mbuf = sym_op->m_src; 600 601 /* first FLE entry used to store mbuf and session ctxt */ 602 fle = (struct qbman_fle *)rte_malloc(NULL, 603 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 604 RTE_CACHE_LINE_SIZE); 605 if (unlikely(!fle)) { 606 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE"); 607 return -ENOMEM; 608 } 609 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 610 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 611 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 612 613 op_fle = fle + 1; 614 ip_fle = fle + 2; 615 sge = fle + 3; 616 617 /* Save the shared descriptor */ 618 flc = &priv->flc_desc[0].flc; 619 620 /* Configure FD as a FRAME LIST */ 621 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 622 DPAA2_SET_FD_COMPOUND_FMT(fd); 623 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 624 625 DPAA2_SEC_DP_DEBUG( 626 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n" 627 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", 628 sym_op->auth.data.offset, 629 sym_op->auth.data.length, 630 sess->digest_length, 631 sym_op->cipher.data.offset, 632 sym_op->cipher.data.length, 633 sess->iv.length, 634 sym_op->m_src->data_off); 635 636 /* Configure Output FLE with Scatter/Gather Entry */ 637 DPAA2_SET_FLE_SG_EXT(op_fle); 638 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 639 640 if (auth_only_len) 641 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len); 642 643 op_fle->length = (sess->dir == DIR_ENC) ? 644 (sym_op->cipher.data.length + icv_len) : 645 sym_op->cipher.data.length; 646 647 /* Configure Output SGE for Encap/Decap */ 648 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 649 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset); 650 sge->length = mbuf->data_len - sym_op->auth.data.offset; 651 652 mbuf = mbuf->next; 653 /* o/p segs */ 654 while (mbuf) { 655 sge++; 656 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 657 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 658 sge->length = mbuf->data_len; 659 mbuf = mbuf->next; 660 } 661 sge->length -= icv_len; 662 663 if (sess->dir == DIR_ENC) { 664 sge++; 665 DPAA2_SET_FLE_ADDR(sge, 666 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 667 sge->length = icv_len; 668 } 669 DPAA2_SET_FLE_FIN(sge); 670 671 sge++; 672 mbuf = sym_op->m_src; 673 674 /* Configure Input FLE with Scatter/Gather Entry */ 675 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 676 DPAA2_SET_FLE_SG_EXT(ip_fle); 677 DPAA2_SET_FLE_FIN(ip_fle); 678 ip_fle->length = (sess->dir == DIR_ENC) ? 679 (sym_op->auth.data.length + sess->iv.length) : 680 (sym_op->auth.data.length + sess->iv.length + 681 icv_len); 682 683 /* Configure Input SGE for Encap/Decap */ 684 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 685 sge->length = sess->iv.length; 686 687 sge++; 688 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 689 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + 690 mbuf->data_off); 691 sge->length = mbuf->data_len - sym_op->auth.data.offset; 692 693 mbuf = mbuf->next; 694 /* i/p segs */ 695 while (mbuf) { 696 sge++; 697 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 698 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 699 sge->length = mbuf->data_len; 700 mbuf = mbuf->next; 701 } 702 sge->length -= icv_len; 703 704 if (sess->dir == DIR_DEC) { 705 sge++; 706 old_icv = (uint8_t *)(sge + 1); 707 memcpy(old_icv, sym_op->auth.digest.data, 708 icv_len); 709 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 710 sge->length = icv_len; 711 } 712 713 DPAA2_SET_FLE_FIN(sge); 714 if (auth_only_len) { 715 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len); 716 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 717 } 718 DPAA2_SET_FD_LEN(fd, ip_fle->length); 719 720 return 0; 721 } 722 723 static inline int 724 build_authenc_fd(dpaa2_sec_session *sess, 725 struct rte_crypto_op *op, 726 struct qbman_fd *fd, uint16_t bpid) 727 { 728 struct rte_crypto_sym_op *sym_op = op->sym; 729 struct ctxt_priv *priv = sess->ctxt; 730 struct qbman_fle *fle, *sge; 731 struct sec_flow_context *flc; 732 uint16_t auth_hdr_len = sym_op->cipher.data.offset - 733 sym_op->auth.data.offset; 734 uint16_t auth_tail_len = sym_op->auth.data.length - 735 sym_op->cipher.data.length - auth_hdr_len; 736 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len; 737 738 int icv_len = sess->digest_length, retval; 739 uint8_t *old_icv; 740 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 741 sess->iv.offset); 742 struct rte_mbuf *dst; 743 744 if (sym_op->m_dst) 745 dst = sym_op->m_dst; 746 else 747 dst = sym_op->m_src; 748 749 /* we are using the first FLE entry to store Mbuf. 750 * Currently we donot know which FLE has the mbuf stored. 751 * So while retreiving we can go back 1 FLE from the FD -ADDR 752 * to get the MBUF Addr from the previous FLE. 753 * We can have a better approach to use the inline Mbuf 754 */ 755 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 756 if (retval) { 757 DPAA2_SEC_ERR("Memory alloc failed for SGE"); 758 return -ENOMEM; 759 } 760 memset(fle, 0, FLE_POOL_BUF_SIZE); 761 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 762 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 763 fle = fle + 1; 764 sge = fle + 2; 765 if (likely(bpid < MAX_BPID)) { 766 DPAA2_SET_FD_BPID(fd, bpid); 767 DPAA2_SET_FLE_BPID(fle, bpid); 768 DPAA2_SET_FLE_BPID(fle + 1, bpid); 769 DPAA2_SET_FLE_BPID(sge, bpid); 770 DPAA2_SET_FLE_BPID(sge + 1, bpid); 771 DPAA2_SET_FLE_BPID(sge + 2, bpid); 772 DPAA2_SET_FLE_BPID(sge + 3, bpid); 773 } else { 774 DPAA2_SET_FD_IVP(fd); 775 DPAA2_SET_FLE_IVP(fle); 776 DPAA2_SET_FLE_IVP((fle + 1)); 777 DPAA2_SET_FLE_IVP(sge); 778 DPAA2_SET_FLE_IVP((sge + 1)); 779 DPAA2_SET_FLE_IVP((sge + 2)); 780 DPAA2_SET_FLE_IVP((sge + 3)); 781 } 782 783 /* Save the shared descriptor */ 784 flc = &priv->flc_desc[0].flc; 785 /* Configure FD as a FRAME LIST */ 786 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 787 DPAA2_SET_FD_COMPOUND_FMT(fd); 788 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 789 790 DPAA2_SEC_DP_DEBUG( 791 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n" 792 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", 793 sym_op->auth.data.offset, 794 sym_op->auth.data.length, 795 sess->digest_length, 796 sym_op->cipher.data.offset, 797 sym_op->cipher.data.length, 798 sess->iv.length, 799 sym_op->m_src->data_off); 800 801 /* Configure Output FLE with Scatter/Gather Entry */ 802 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 803 if (auth_only_len) 804 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 805 fle->length = (sess->dir == DIR_ENC) ? 806 (sym_op->cipher.data.length + icv_len) : 807 sym_op->cipher.data.length; 808 809 DPAA2_SET_FLE_SG_EXT(fle); 810 811 /* Configure Output SGE for Encap/Decap */ 812 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 813 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + 814 dst->data_off); 815 sge->length = sym_op->cipher.data.length; 816 817 if (sess->dir == DIR_ENC) { 818 sge++; 819 DPAA2_SET_FLE_ADDR(sge, 820 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 821 sge->length = sess->digest_length; 822 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 823 sess->iv.length)); 824 } 825 DPAA2_SET_FLE_FIN(sge); 826 827 sge++; 828 fle++; 829 830 /* Configure Input FLE with Scatter/Gather Entry */ 831 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 832 DPAA2_SET_FLE_SG_EXT(fle); 833 DPAA2_SET_FLE_FIN(fle); 834 fle->length = (sess->dir == DIR_ENC) ? 835 (sym_op->auth.data.length + sess->iv.length) : 836 (sym_op->auth.data.length + sess->iv.length + 837 sess->digest_length); 838 839 /* Configure Input SGE for Encap/Decap */ 840 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 841 sge->length = sess->iv.length; 842 sge++; 843 844 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 845 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + 846 sym_op->m_src->data_off); 847 sge->length = sym_op->auth.data.length; 848 if (sess->dir == DIR_DEC) { 849 sge++; 850 old_icv = (uint8_t *)(sge + 1); 851 memcpy(old_icv, sym_op->auth.digest.data, 852 sess->digest_length); 853 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 854 sge->length = sess->digest_length; 855 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 856 sess->digest_length + 857 sess->iv.length)); 858 } 859 DPAA2_SET_FLE_FIN(sge); 860 if (auth_only_len) { 861 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 862 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 863 } 864 return 0; 865 } 866 867 static inline int build_auth_sg_fd( 868 dpaa2_sec_session *sess, 869 struct rte_crypto_op *op, 870 struct qbman_fd *fd, 871 __rte_unused uint16_t bpid) 872 { 873 struct rte_crypto_sym_op *sym_op = op->sym; 874 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 875 struct sec_flow_context *flc; 876 struct ctxt_priv *priv = sess->ctxt; 877 int data_len, data_offset; 878 uint8_t *old_digest; 879 struct rte_mbuf *mbuf; 880 881 data_len = sym_op->auth.data.length; 882 data_offset = sym_op->auth.data.offset; 883 884 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 885 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 886 if ((data_len & 7) || (data_offset & 7)) { 887 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes"); 888 return -ENOTSUP; 889 } 890 891 data_len = data_len >> 3; 892 data_offset = data_offset >> 3; 893 } 894 895 mbuf = sym_op->m_src; 896 fle = (struct qbman_fle *)rte_malloc(NULL, 897 FLE_SG_MEM_SIZE(mbuf->nb_segs), 898 RTE_CACHE_LINE_SIZE); 899 if (unlikely(!fle)) { 900 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE"); 901 return -ENOMEM; 902 } 903 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs)); 904 /* first FLE entry used to store mbuf and session ctxt */ 905 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 906 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 907 op_fle = fle + 1; 908 ip_fle = fle + 2; 909 sge = fle + 3; 910 911 flc = &priv->flc_desc[DESC_INITFINAL].flc; 912 /* sg FD */ 913 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 914 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 915 DPAA2_SET_FD_COMPOUND_FMT(fd); 916 917 /* o/p fle */ 918 DPAA2_SET_FLE_ADDR(op_fle, 919 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 920 op_fle->length = sess->digest_length; 921 922 /* i/p fle */ 923 DPAA2_SET_FLE_SG_EXT(ip_fle); 924 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 925 ip_fle->length = data_len; 926 927 if (sess->iv.length) { 928 uint8_t *iv_ptr; 929 930 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 931 sess->iv.offset); 932 933 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 934 iv_ptr = conv_to_snow_f9_iv(iv_ptr); 935 sge->length = 12; 936 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 937 iv_ptr = conv_to_zuc_eia_iv(iv_ptr); 938 sge->length = 8; 939 } else { 940 sge->length = sess->iv.length; 941 } 942 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 943 ip_fle->length += sge->length; 944 sge++; 945 } 946 /* i/p 1st seg */ 947 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 948 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off); 949 950 if (data_len <= (mbuf->data_len - data_offset)) { 951 sge->length = data_len; 952 data_len = 0; 953 } else { 954 sge->length = mbuf->data_len - data_offset; 955 956 /* remaining i/p segs */ 957 while ((data_len = data_len - sge->length) && 958 (mbuf = mbuf->next)) { 959 sge++; 960 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 961 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 962 if (data_len > mbuf->data_len) 963 sge->length = mbuf->data_len; 964 else 965 sge->length = data_len; 966 } 967 } 968 969 if (sess->dir == DIR_DEC) { 970 /* Digest verification case */ 971 sge++; 972 old_digest = (uint8_t *)(sge + 1); 973 rte_memcpy(old_digest, sym_op->auth.digest.data, 974 sess->digest_length); 975 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest)); 976 sge->length = sess->digest_length; 977 ip_fle->length += sess->digest_length; 978 } 979 DPAA2_SET_FLE_FIN(sge); 980 DPAA2_SET_FLE_FIN(ip_fle); 981 DPAA2_SET_FD_LEN(fd, ip_fle->length); 982 983 return 0; 984 } 985 986 static inline int 987 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 988 struct qbman_fd *fd, uint16_t bpid) 989 { 990 struct rte_crypto_sym_op *sym_op = op->sym; 991 struct qbman_fle *fle, *sge; 992 struct sec_flow_context *flc; 993 struct ctxt_priv *priv = sess->ctxt; 994 int data_len, data_offset; 995 uint8_t *old_digest; 996 int retval; 997 998 data_len = sym_op->auth.data.length; 999 data_offset = sym_op->auth.data.offset; 1000 1001 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 1002 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 1003 if ((data_len & 7) || (data_offset & 7)) { 1004 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes"); 1005 return -ENOTSUP; 1006 } 1007 1008 data_len = data_len >> 3; 1009 data_offset = data_offset >> 3; 1010 } 1011 1012 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 1013 if (retval) { 1014 DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE"); 1015 return -ENOMEM; 1016 } 1017 memset(fle, 0, FLE_POOL_BUF_SIZE); 1018 /* TODO we are using the first FLE entry to store Mbuf. 1019 * Currently we donot know which FLE has the mbuf stored. 1020 * So while retreiving we can go back 1 FLE from the FD -ADDR 1021 * to get the MBUF Addr from the previous FLE. 1022 * We can have a better approach to use the inline Mbuf 1023 */ 1024 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 1025 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 1026 fle = fle + 1; 1027 sge = fle + 2; 1028 1029 if (likely(bpid < MAX_BPID)) { 1030 DPAA2_SET_FD_BPID(fd, bpid); 1031 DPAA2_SET_FLE_BPID(fle, bpid); 1032 DPAA2_SET_FLE_BPID(fle + 1, bpid); 1033 DPAA2_SET_FLE_BPID(sge, bpid); 1034 DPAA2_SET_FLE_BPID(sge + 1, bpid); 1035 } else { 1036 DPAA2_SET_FD_IVP(fd); 1037 DPAA2_SET_FLE_IVP(fle); 1038 DPAA2_SET_FLE_IVP((fle + 1)); 1039 DPAA2_SET_FLE_IVP(sge); 1040 DPAA2_SET_FLE_IVP((sge + 1)); 1041 } 1042 1043 flc = &priv->flc_desc[DESC_INITFINAL].flc; 1044 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1045 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 1046 DPAA2_SET_FD_COMPOUND_FMT(fd); 1047 1048 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 1049 fle->length = sess->digest_length; 1050 fle++; 1051 1052 /* Setting input FLE */ 1053 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 1054 DPAA2_SET_FLE_SG_EXT(fle); 1055 fle->length = data_len; 1056 1057 if (sess->iv.length) { 1058 uint8_t *iv_ptr; 1059 1060 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1061 sess->iv.offset); 1062 1063 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 1064 iv_ptr = conv_to_snow_f9_iv(iv_ptr); 1065 sge->length = 12; 1066 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 1067 iv_ptr = conv_to_zuc_eia_iv(iv_ptr); 1068 sge->length = 8; 1069 } else { 1070 sge->length = sess->iv.length; 1071 } 1072 1073 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1074 fle->length = fle->length + sge->length; 1075 sge++; 1076 } 1077 1078 /* Setting data to authenticate */ 1079 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 1080 DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off); 1081 sge->length = data_len; 1082 1083 if (sess->dir == DIR_DEC) { 1084 sge++; 1085 old_digest = (uint8_t *)(sge + 1); 1086 rte_memcpy(old_digest, sym_op->auth.digest.data, 1087 sess->digest_length); 1088 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest)); 1089 sge->length = sess->digest_length; 1090 fle->length = fle->length + sess->digest_length; 1091 } 1092 1093 DPAA2_SET_FLE_FIN(sge); 1094 DPAA2_SET_FLE_FIN(fle); 1095 DPAA2_SET_FD_LEN(fd, fle->length); 1096 1097 return 0; 1098 } 1099 1100 static int 1101 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 1102 struct qbman_fd *fd, __rte_unused uint16_t bpid) 1103 { 1104 struct rte_crypto_sym_op *sym_op = op->sym; 1105 struct qbman_fle *ip_fle, *op_fle, *sge, *fle; 1106 int data_len, data_offset; 1107 struct sec_flow_context *flc; 1108 struct ctxt_priv *priv = sess->ctxt; 1109 struct rte_mbuf *mbuf; 1110 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1111 sess->iv.offset); 1112 1113 data_len = sym_op->cipher.data.length; 1114 data_offset = sym_op->cipher.data.offset; 1115 1116 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 1117 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 1118 if ((data_len & 7) || (data_offset & 7)) { 1119 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes"); 1120 return -ENOTSUP; 1121 } 1122 1123 data_len = data_len >> 3; 1124 data_offset = data_offset >> 3; 1125 } 1126 1127 if (sym_op->m_dst) 1128 mbuf = sym_op->m_dst; 1129 else 1130 mbuf = sym_op->m_src; 1131 1132 /* first FLE entry used to store mbuf and session ctxt */ 1133 fle = (struct qbman_fle *)rte_malloc(NULL, 1134 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 1135 RTE_CACHE_LINE_SIZE); 1136 if (!fle) { 1137 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE"); 1138 return -ENOMEM; 1139 } 1140 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 1141 /* first FLE entry used to store mbuf and session ctxt */ 1142 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 1143 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 1144 1145 op_fle = fle + 1; 1146 ip_fle = fle + 2; 1147 sge = fle + 3; 1148 1149 flc = &priv->flc_desc[0].flc; 1150 1151 DPAA2_SEC_DP_DEBUG( 1152 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d" 1153 " data_off: 0x%x\n", 1154 data_offset, 1155 data_len, 1156 sess->iv.length, 1157 sym_op->m_src->data_off); 1158 1159 /* o/p fle */ 1160 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 1161 op_fle->length = data_len; 1162 DPAA2_SET_FLE_SG_EXT(op_fle); 1163 1164 /* o/p 1st seg */ 1165 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1166 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off); 1167 sge->length = mbuf->data_len - data_offset; 1168 1169 mbuf = mbuf->next; 1170 /* o/p segs */ 1171 while (mbuf) { 1172 sge++; 1173 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1174 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 1175 sge->length = mbuf->data_len; 1176 mbuf = mbuf->next; 1177 } 1178 DPAA2_SET_FLE_FIN(sge); 1179 1180 DPAA2_SEC_DP_DEBUG( 1181 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n", 1182 flc, fle, fle->addr_hi, fle->addr_lo, 1183 fle->length); 1184 1185 /* i/p fle */ 1186 mbuf = sym_op->m_src; 1187 sge++; 1188 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 1189 ip_fle->length = sess->iv.length + data_len; 1190 DPAA2_SET_FLE_SG_EXT(ip_fle); 1191 1192 /* i/p IV */ 1193 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1194 DPAA2_SET_FLE_OFFSET(sge, 0); 1195 sge->length = sess->iv.length; 1196 1197 sge++; 1198 1199 /* i/p 1st seg */ 1200 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1201 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off); 1202 sge->length = mbuf->data_len - data_offset; 1203 1204 mbuf = mbuf->next; 1205 /* i/p segs */ 1206 while (mbuf) { 1207 sge++; 1208 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1209 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 1210 sge->length = mbuf->data_len; 1211 mbuf = mbuf->next; 1212 } 1213 DPAA2_SET_FLE_FIN(sge); 1214 DPAA2_SET_FLE_FIN(ip_fle); 1215 1216 /* sg fd */ 1217 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 1218 DPAA2_SET_FD_LEN(fd, ip_fle->length); 1219 DPAA2_SET_FD_COMPOUND_FMT(fd); 1220 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1221 1222 DPAA2_SEC_DP_DEBUG( 1223 "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d" 1224 " off =%d, len =%d\n", 1225 DPAA2_GET_FD_ADDR(fd), 1226 DPAA2_GET_FD_BPID(fd), 1227 rte_dpaa2_bpid_info[bpid].meta_data_size, 1228 DPAA2_GET_FD_OFFSET(fd), 1229 DPAA2_GET_FD_LEN(fd)); 1230 return 0; 1231 } 1232 1233 static int 1234 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 1235 struct qbman_fd *fd, uint16_t bpid) 1236 { 1237 struct rte_crypto_sym_op *sym_op = op->sym; 1238 struct qbman_fle *fle, *sge; 1239 int retval, data_len, data_offset; 1240 struct sec_flow_context *flc; 1241 struct ctxt_priv *priv = sess->ctxt; 1242 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1243 sess->iv.offset); 1244 struct rte_mbuf *dst; 1245 1246 data_len = sym_op->cipher.data.length; 1247 data_offset = sym_op->cipher.data.offset; 1248 1249 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 1250 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 1251 if ((data_len & 7) || (data_offset & 7)) { 1252 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes"); 1253 return -ENOTSUP; 1254 } 1255 1256 data_len = data_len >> 3; 1257 data_offset = data_offset >> 3; 1258 } 1259 1260 if (sym_op->m_dst) 1261 dst = sym_op->m_dst; 1262 else 1263 dst = sym_op->m_src; 1264 1265 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 1266 if (retval) { 1267 DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE"); 1268 return -ENOMEM; 1269 } 1270 memset(fle, 0, FLE_POOL_BUF_SIZE); 1271 /* TODO we are using the first FLE entry to store Mbuf. 1272 * Currently we donot know which FLE has the mbuf stored. 1273 * So while retreiving we can go back 1 FLE from the FD -ADDR 1274 * to get the MBUF Addr from the previous FLE. 1275 * We can have a better approach to use the inline Mbuf 1276 */ 1277 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 1278 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 1279 fle = fle + 1; 1280 sge = fle + 2; 1281 1282 if (likely(bpid < MAX_BPID)) { 1283 DPAA2_SET_FD_BPID(fd, bpid); 1284 DPAA2_SET_FLE_BPID(fle, bpid); 1285 DPAA2_SET_FLE_BPID(fle + 1, bpid); 1286 DPAA2_SET_FLE_BPID(sge, bpid); 1287 DPAA2_SET_FLE_BPID(sge + 1, bpid); 1288 } else { 1289 DPAA2_SET_FD_IVP(fd); 1290 DPAA2_SET_FLE_IVP(fle); 1291 DPAA2_SET_FLE_IVP((fle + 1)); 1292 DPAA2_SET_FLE_IVP(sge); 1293 DPAA2_SET_FLE_IVP((sge + 1)); 1294 } 1295 1296 flc = &priv->flc_desc[0].flc; 1297 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 1298 DPAA2_SET_FD_LEN(fd, data_len + sess->iv.length); 1299 DPAA2_SET_FD_COMPOUND_FMT(fd); 1300 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1301 1302 DPAA2_SEC_DP_DEBUG( 1303 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d," 1304 " data_off: 0x%x\n", 1305 data_offset, 1306 data_len, 1307 sess->iv.length, 1308 sym_op->m_src->data_off); 1309 1310 DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 1311 DPAA2_SET_FLE_OFFSET(fle, data_offset + dst->data_off); 1312 1313 fle->length = data_len + sess->iv.length; 1314 1315 DPAA2_SEC_DP_DEBUG( 1316 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n", 1317 flc, fle, fle->addr_hi, fle->addr_lo, 1318 fle->length); 1319 1320 fle++; 1321 1322 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 1323 fle->length = data_len + sess->iv.length; 1324 1325 DPAA2_SET_FLE_SG_EXT(fle); 1326 1327 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1328 sge->length = sess->iv.length; 1329 1330 sge++; 1331 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 1332 DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off); 1333 1334 sge->length = data_len; 1335 DPAA2_SET_FLE_FIN(sge); 1336 DPAA2_SET_FLE_FIN(fle); 1337 1338 DPAA2_SEC_DP_DEBUG( 1339 "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d" 1340 " off =%d, len =%d\n", 1341 DPAA2_GET_FD_ADDR(fd), 1342 DPAA2_GET_FD_BPID(fd), 1343 rte_dpaa2_bpid_info[bpid].meta_data_size, 1344 DPAA2_GET_FD_OFFSET(fd), 1345 DPAA2_GET_FD_LEN(fd)); 1346 1347 return 0; 1348 } 1349 1350 static inline int 1351 build_sec_fd(struct rte_crypto_op *op, 1352 struct qbman_fd *fd, uint16_t bpid) 1353 { 1354 int ret = -1; 1355 dpaa2_sec_session *sess; 1356 1357 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) 1358 sess = (dpaa2_sec_session *)get_sym_session_private_data( 1359 op->sym->session, cryptodev_driver_id); 1360 #ifdef RTE_LIBRTE_SECURITY 1361 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) 1362 sess = (dpaa2_sec_session *)get_sec_session_private_data( 1363 op->sym->sec_session); 1364 #endif 1365 else 1366 return -ENOTSUP; 1367 1368 if (!sess) 1369 return -EINVAL; 1370 1371 /* Any of the buffer is segmented*/ 1372 if (!rte_pktmbuf_is_contiguous(op->sym->m_src) || 1373 ((op->sym->m_dst != NULL) && 1374 !rte_pktmbuf_is_contiguous(op->sym->m_dst))) { 1375 switch (sess->ctxt_type) { 1376 case DPAA2_SEC_CIPHER: 1377 ret = build_cipher_sg_fd(sess, op, fd, bpid); 1378 break; 1379 case DPAA2_SEC_AUTH: 1380 ret = build_auth_sg_fd(sess, op, fd, bpid); 1381 break; 1382 case DPAA2_SEC_AEAD: 1383 ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid); 1384 break; 1385 case DPAA2_SEC_CIPHER_HASH: 1386 ret = build_authenc_sg_fd(sess, op, fd, bpid); 1387 break; 1388 #ifdef RTE_LIBRTE_SECURITY 1389 case DPAA2_SEC_IPSEC: 1390 case DPAA2_SEC_PDCP: 1391 ret = build_proto_compound_sg_fd(sess, op, fd, bpid); 1392 break; 1393 #endif 1394 case DPAA2_SEC_HASH_CIPHER: 1395 default: 1396 DPAA2_SEC_ERR("error: Unsupported session"); 1397 } 1398 } else { 1399 switch (sess->ctxt_type) { 1400 case DPAA2_SEC_CIPHER: 1401 ret = build_cipher_fd(sess, op, fd, bpid); 1402 break; 1403 case DPAA2_SEC_AUTH: 1404 ret = build_auth_fd(sess, op, fd, bpid); 1405 break; 1406 case DPAA2_SEC_AEAD: 1407 ret = build_authenc_gcm_fd(sess, op, fd, bpid); 1408 break; 1409 case DPAA2_SEC_CIPHER_HASH: 1410 ret = build_authenc_fd(sess, op, fd, bpid); 1411 break; 1412 #ifdef RTE_LIBRTE_SECURITY 1413 case DPAA2_SEC_IPSEC: 1414 ret = build_proto_fd(sess, op, fd, bpid); 1415 break; 1416 case DPAA2_SEC_PDCP: 1417 ret = build_proto_compound_fd(sess, op, fd, bpid); 1418 break; 1419 #endif 1420 case DPAA2_SEC_HASH_CIPHER: 1421 default: 1422 DPAA2_SEC_ERR("error: Unsupported session"); 1423 ret = -ENOTSUP; 1424 } 1425 } 1426 return ret; 1427 } 1428 1429 static uint16_t 1430 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, 1431 uint16_t nb_ops) 1432 { 1433 /* Function to transmit the frames to given device and VQ*/ 1434 uint32_t loop; 1435 int32_t ret; 1436 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 1437 uint32_t frames_to_send, retry_count; 1438 struct qbman_eq_desc eqdesc; 1439 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 1440 struct qbman_swp *swp; 1441 uint16_t num_tx = 0; 1442 uint32_t flags[MAX_TX_RING_SLOTS] = {0}; 1443 /*todo - need to support multiple buffer pools */ 1444 uint16_t bpid; 1445 struct rte_mempool *mb_pool; 1446 1447 if (unlikely(nb_ops == 0)) 1448 return 0; 1449 1450 if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { 1451 DPAA2_SEC_ERR("sessionless crypto op not supported"); 1452 return 0; 1453 } 1454 /*Prepare enqueue descriptor*/ 1455 qbman_eq_desc_clear(&eqdesc); 1456 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); 1457 qbman_eq_desc_set_response(&eqdesc, 0, 0); 1458 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid); 1459 1460 if (!DPAA2_PER_LCORE_DPIO) { 1461 ret = dpaa2_affine_qbman_swp(); 1462 if (ret) { 1463 DPAA2_SEC_ERR("Failure in affining portal"); 1464 return 0; 1465 } 1466 } 1467 swp = DPAA2_PER_LCORE_PORTAL; 1468 1469 while (nb_ops) { 1470 frames_to_send = (nb_ops > dpaa2_eqcr_size) ? 1471 dpaa2_eqcr_size : nb_ops; 1472 1473 for (loop = 0; loop < frames_to_send; loop++) { 1474 if ((*ops)->sym->m_src->seqn) { 1475 uint8_t dqrr_index = (*ops)->sym->m_src->seqn - 1; 1476 1477 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index; 1478 DPAA2_PER_LCORE_DQRR_SIZE--; 1479 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index); 1480 (*ops)->sym->m_src->seqn = DPAA2_INVALID_MBUF_SEQN; 1481 } 1482 1483 /*Clear the unused FD fields before sending*/ 1484 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd)); 1485 mb_pool = (*ops)->sym->m_src->pool; 1486 bpid = mempool_to_bpid(mb_pool); 1487 ret = build_sec_fd(*ops, &fd_arr[loop], bpid); 1488 if (ret) { 1489 DPAA2_SEC_ERR("error: Improper packet contents" 1490 " for crypto operation"); 1491 goto skip_tx; 1492 } 1493 ops++; 1494 } 1495 1496 loop = 0; 1497 retry_count = 0; 1498 while (loop < frames_to_send) { 1499 ret = qbman_swp_enqueue_multiple(swp, &eqdesc, 1500 &fd_arr[loop], 1501 &flags[loop], 1502 frames_to_send - loop); 1503 if (unlikely(ret < 0)) { 1504 retry_count++; 1505 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) { 1506 num_tx += loop; 1507 nb_ops -= loop; 1508 goto skip_tx; 1509 } 1510 } else { 1511 loop += ret; 1512 retry_count = 0; 1513 } 1514 } 1515 1516 num_tx += loop; 1517 nb_ops -= loop; 1518 } 1519 skip_tx: 1520 dpaa2_qp->tx_vq.tx_pkts += num_tx; 1521 dpaa2_qp->tx_vq.err_pkts += nb_ops; 1522 return num_tx; 1523 } 1524 1525 #ifdef RTE_LIBRTE_SECURITY 1526 static inline struct rte_crypto_op * 1527 sec_simple_fd_to_mbuf(const struct qbman_fd *fd) 1528 { 1529 struct rte_crypto_op *op; 1530 uint16_t len = DPAA2_GET_FD_LEN(fd); 1531 int16_t diff = 0; 1532 dpaa2_sec_session *sess_priv __rte_unused; 1533 1534 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF( 1535 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)), 1536 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 1537 1538 diff = len - mbuf->pkt_len; 1539 mbuf->pkt_len += diff; 1540 mbuf->data_len += diff; 1541 op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova; 1542 mbuf->buf_iova = op->sym->aead.digest.phys_addr; 1543 op->sym->aead.digest.phys_addr = 0L; 1544 1545 sess_priv = (dpaa2_sec_session *)get_sec_session_private_data( 1546 op->sym->sec_session); 1547 if (sess_priv->dir == DIR_ENC) 1548 mbuf->data_off += SEC_FLC_DHR_OUTBOUND; 1549 else 1550 mbuf->data_off += SEC_FLC_DHR_INBOUND; 1551 1552 return op; 1553 } 1554 #endif 1555 1556 static inline struct rte_crypto_op * 1557 sec_fd_to_mbuf(const struct qbman_fd *fd) 1558 { 1559 struct qbman_fle *fle; 1560 struct rte_crypto_op *op; 1561 struct ctxt_priv *priv; 1562 struct rte_mbuf *dst, *src; 1563 1564 #ifdef RTE_LIBRTE_SECURITY 1565 if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single) 1566 return sec_simple_fd_to_mbuf(fd); 1567 #endif 1568 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); 1569 1570 DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n", 1571 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset); 1572 1573 /* we are using the first FLE entry to store Mbuf. 1574 * Currently we donot know which FLE has the mbuf stored. 1575 * So while retreiving we can go back 1 FLE from the FD -ADDR 1576 * to get the MBUF Addr from the previous FLE. 1577 * We can have a better approach to use the inline Mbuf 1578 */ 1579 1580 if (unlikely(DPAA2_GET_FD_IVP(fd))) { 1581 /* TODO complete it. */ 1582 DPAA2_SEC_ERR("error: non inline buffer"); 1583 return NULL; 1584 } 1585 op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1)); 1586 1587 /* Prefeth op */ 1588 src = op->sym->m_src; 1589 rte_prefetch0(src); 1590 1591 if (op->sym->m_dst) { 1592 dst = op->sym->m_dst; 1593 rte_prefetch0(dst); 1594 } else 1595 dst = src; 1596 1597 #ifdef RTE_LIBRTE_SECURITY 1598 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 1599 uint16_t len = DPAA2_GET_FD_LEN(fd); 1600 dst->pkt_len = len; 1601 while (dst->next != NULL) { 1602 len -= dst->data_len; 1603 dst = dst->next; 1604 } 1605 dst->data_len = len; 1606 } 1607 #endif 1608 DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p," 1609 " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n", 1610 (void *)dst, 1611 dst->buf_addr, 1612 DPAA2_GET_FD_ADDR(fd), 1613 DPAA2_GET_FD_BPID(fd), 1614 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, 1615 DPAA2_GET_FD_OFFSET(fd), 1616 DPAA2_GET_FD_LEN(fd)); 1617 1618 /* free the fle memory */ 1619 if (likely(rte_pktmbuf_is_contiguous(src))) { 1620 priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1); 1621 rte_mempool_put(priv->fle_pool, (void *)(fle-1)); 1622 } else 1623 rte_free((void *)(fle-1)); 1624 1625 return op; 1626 } 1627 1628 static uint16_t 1629 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, 1630 uint16_t nb_ops) 1631 { 1632 /* Function is responsible to receive frames for a given device and VQ*/ 1633 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 1634 struct qbman_result *dq_storage; 1635 uint32_t fqid = dpaa2_qp->rx_vq.fqid; 1636 int ret, num_rx = 0; 1637 uint8_t is_last = 0, status; 1638 struct qbman_swp *swp; 1639 const struct qbman_fd *fd; 1640 struct qbman_pull_desc pulldesc; 1641 1642 if (!DPAA2_PER_LCORE_DPIO) { 1643 ret = dpaa2_affine_qbman_swp(); 1644 if (ret) { 1645 DPAA2_SEC_ERR("Failure in affining portal"); 1646 return 0; 1647 } 1648 } 1649 swp = DPAA2_PER_LCORE_PORTAL; 1650 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0]; 1651 1652 qbman_pull_desc_clear(&pulldesc); 1653 qbman_pull_desc_set_numframes(&pulldesc, 1654 (nb_ops > dpaa2_dqrr_size) ? 1655 dpaa2_dqrr_size : nb_ops); 1656 qbman_pull_desc_set_fq(&pulldesc, fqid); 1657 qbman_pull_desc_set_storage(&pulldesc, dq_storage, 1658 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage), 1659 1); 1660 1661 /*Issue a volatile dequeue command. */ 1662 while (1) { 1663 if (qbman_swp_pull(swp, &pulldesc)) { 1664 DPAA2_SEC_WARN( 1665 "SEC VDQ command is not issued : QBMAN busy"); 1666 /* Portal was busy, try again */ 1667 continue; 1668 } 1669 break; 1670 }; 1671 1672 /* Receive the packets till Last Dequeue entry is found with 1673 * respect to the above issues PULL command. 1674 */ 1675 while (!is_last) { 1676 /* Check if the previous issued command is completed. 1677 * Also seems like the SWP is shared between the Ethernet Driver 1678 * and the SEC driver. 1679 */ 1680 while (!qbman_check_command_complete(dq_storage)) 1681 ; 1682 1683 /* Loop until the dq_storage is updated with 1684 * new token by QBMAN 1685 */ 1686 while (!qbman_check_new_result(dq_storage)) 1687 ; 1688 /* Check whether Last Pull command is Expired and 1689 * setting Condition for Loop termination 1690 */ 1691 if (qbman_result_DQ_is_pull_complete(dq_storage)) { 1692 is_last = 1; 1693 /* Check for valid frame. */ 1694 status = (uint8_t)qbman_result_DQ_flags(dq_storage); 1695 if (unlikely( 1696 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) { 1697 DPAA2_SEC_DP_DEBUG("No frame is delivered\n"); 1698 continue; 1699 } 1700 } 1701 1702 fd = qbman_result_DQ_fd(dq_storage); 1703 ops[num_rx] = sec_fd_to_mbuf(fd); 1704 1705 if (unlikely(fd->simple.frc)) { 1706 /* TODO Parse SEC errors */ 1707 DPAA2_SEC_ERR("SEC returned Error - %x", 1708 fd->simple.frc); 1709 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR; 1710 } else { 1711 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 1712 } 1713 1714 num_rx++; 1715 dq_storage++; 1716 } /* End of Packet Rx loop */ 1717 1718 dpaa2_qp->rx_vq.rx_pkts += num_rx; 1719 1720 DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx); 1721 /*Return the total number of packets received to DPAA2 app*/ 1722 return num_rx; 1723 } 1724 1725 /** Release queue pair */ 1726 static int 1727 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) 1728 { 1729 struct dpaa2_sec_qp *qp = 1730 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id]; 1731 1732 PMD_INIT_FUNC_TRACE(); 1733 1734 if (qp->rx_vq.q_storage) { 1735 dpaa2_free_dq_storage(qp->rx_vq.q_storage); 1736 rte_free(qp->rx_vq.q_storage); 1737 } 1738 rte_free(qp); 1739 1740 dev->data->queue_pairs[queue_pair_id] = NULL; 1741 1742 return 0; 1743 } 1744 1745 /** Setup a queue pair */ 1746 static int 1747 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, 1748 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf, 1749 __rte_unused int socket_id) 1750 { 1751 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 1752 struct dpaa2_sec_qp *qp; 1753 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 1754 struct dpseci_rx_queue_cfg cfg; 1755 int32_t retcode; 1756 1757 PMD_INIT_FUNC_TRACE(); 1758 1759 /* If qp is already in use free ring memory and qp metadata. */ 1760 if (dev->data->queue_pairs[qp_id] != NULL) { 1761 DPAA2_SEC_INFO("QP already setup"); 1762 return 0; 1763 } 1764 1765 DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p", 1766 dev, qp_id, qp_conf); 1767 1768 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 1769 1770 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp), 1771 RTE_CACHE_LINE_SIZE); 1772 if (!qp) { 1773 DPAA2_SEC_ERR("malloc failed for rx/tx queues"); 1774 return -ENOMEM; 1775 } 1776 1777 qp->rx_vq.crypto_data = dev->data; 1778 qp->tx_vq.crypto_data = dev->data; 1779 qp->rx_vq.q_storage = rte_malloc("sec dq storage", 1780 sizeof(struct queue_storage_info_t), 1781 RTE_CACHE_LINE_SIZE); 1782 if (!qp->rx_vq.q_storage) { 1783 DPAA2_SEC_ERR("malloc failed for q_storage"); 1784 return -ENOMEM; 1785 } 1786 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t)); 1787 1788 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) { 1789 DPAA2_SEC_ERR("Unable to allocate dequeue storage"); 1790 return -ENOMEM; 1791 } 1792 1793 dev->data->queue_pairs[qp_id] = qp; 1794 1795 cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX; 1796 cfg.user_ctx = (size_t)(&qp->rx_vq); 1797 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 1798 qp_id, &cfg); 1799 return retcode; 1800 } 1801 1802 /** Returns the size of the aesni gcm session structure */ 1803 static unsigned int 1804 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) 1805 { 1806 PMD_INIT_FUNC_TRACE(); 1807 1808 return sizeof(dpaa2_sec_session); 1809 } 1810 1811 static int 1812 dpaa2_sec_cipher_init(struct rte_cryptodev *dev, 1813 struct rte_crypto_sym_xform *xform, 1814 dpaa2_sec_session *session) 1815 { 1816 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1817 struct alginfo cipherdata; 1818 int bufsize, ret = 0; 1819 struct ctxt_priv *priv; 1820 struct sec_flow_context *flc; 1821 1822 PMD_INIT_FUNC_TRACE(); 1823 1824 /* For SEC CIPHER only one descriptor is required. */ 1825 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1826 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 1827 RTE_CACHE_LINE_SIZE); 1828 if (priv == NULL) { 1829 DPAA2_SEC_ERR("No Memory for priv CTXT"); 1830 return -ENOMEM; 1831 } 1832 1833 priv->fle_pool = dev_priv->fle_pool; 1834 1835 flc = &priv->flc_desc[0].flc; 1836 1837 session->ctxt_type = DPAA2_SEC_CIPHER; 1838 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, 1839 RTE_CACHE_LINE_SIZE); 1840 if (session->cipher_key.data == NULL) { 1841 DPAA2_SEC_ERR("No Memory for cipher key"); 1842 rte_free(priv); 1843 return -ENOMEM; 1844 } 1845 session->cipher_key.length = xform->cipher.key.length; 1846 1847 memcpy(session->cipher_key.data, xform->cipher.key.data, 1848 xform->cipher.key.length); 1849 cipherdata.key = (size_t)session->cipher_key.data; 1850 cipherdata.keylen = session->cipher_key.length; 1851 cipherdata.key_enc_flags = 0; 1852 cipherdata.key_type = RTA_DATA_IMM; 1853 1854 /* Set IV parameters */ 1855 session->iv.offset = xform->cipher.iv.offset; 1856 session->iv.length = xform->cipher.iv.length; 1857 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1858 DIR_ENC : DIR_DEC; 1859 1860 switch (xform->cipher.algo) { 1861 case RTE_CRYPTO_CIPHER_AES_CBC: 1862 cipherdata.algtype = OP_ALG_ALGSEL_AES; 1863 cipherdata.algmode = OP_ALG_AAI_CBC; 1864 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 1865 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 1866 SHR_NEVER, &cipherdata, 1867 session->iv.length, 1868 session->dir); 1869 break; 1870 case RTE_CRYPTO_CIPHER_3DES_CBC: 1871 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 1872 cipherdata.algmode = OP_ALG_AAI_CBC; 1873 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 1874 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 1875 SHR_NEVER, &cipherdata, 1876 session->iv.length, 1877 session->dir); 1878 break; 1879 case RTE_CRYPTO_CIPHER_AES_CTR: 1880 cipherdata.algtype = OP_ALG_ALGSEL_AES; 1881 cipherdata.algmode = OP_ALG_AAI_CTR; 1882 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 1883 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 1884 SHR_NEVER, &cipherdata, 1885 session->iv.length, 1886 session->dir); 1887 break; 1888 case RTE_CRYPTO_CIPHER_3DES_CTR: 1889 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 1890 cipherdata.algmode = OP_ALG_AAI_CTR; 1891 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CTR; 1892 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 1893 SHR_NEVER, &cipherdata, 1894 session->iv.length, 1895 session->dir); 1896 break; 1897 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 1898 cipherdata.algtype = OP_ALG_ALGSEL_SNOW_F8; 1899 session->cipher_alg = RTE_CRYPTO_CIPHER_SNOW3G_UEA2; 1900 bufsize = cnstr_shdsc_snow_f8(priv->flc_desc[0].desc, 1, 0, 1901 &cipherdata, 1902 session->dir); 1903 break; 1904 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 1905 cipherdata.algtype = OP_ALG_ALGSEL_ZUCE; 1906 session->cipher_alg = RTE_CRYPTO_CIPHER_ZUC_EEA3; 1907 bufsize = cnstr_shdsc_zuce(priv->flc_desc[0].desc, 1, 0, 1908 &cipherdata, 1909 session->dir); 1910 break; 1911 case RTE_CRYPTO_CIPHER_KASUMI_F8: 1912 case RTE_CRYPTO_CIPHER_AES_F8: 1913 case RTE_CRYPTO_CIPHER_AES_ECB: 1914 case RTE_CRYPTO_CIPHER_3DES_ECB: 1915 case RTE_CRYPTO_CIPHER_AES_XTS: 1916 case RTE_CRYPTO_CIPHER_ARC4: 1917 case RTE_CRYPTO_CIPHER_NULL: 1918 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", 1919 xform->cipher.algo); 1920 ret = -ENOTSUP; 1921 goto error_out; 1922 default: 1923 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 1924 xform->cipher.algo); 1925 ret = -ENOTSUP; 1926 goto error_out; 1927 } 1928 1929 if (bufsize < 0) { 1930 DPAA2_SEC_ERR("Crypto: Descriptor build failed"); 1931 ret = -EINVAL; 1932 goto error_out; 1933 } 1934 1935 flc->word1_sdl = (uint8_t)bufsize; 1936 session->ctxt = priv; 1937 1938 #ifdef CAAM_DESC_DEBUG 1939 int i; 1940 for (i = 0; i < bufsize; i++) 1941 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]); 1942 #endif 1943 return ret; 1944 1945 error_out: 1946 rte_free(session->cipher_key.data); 1947 rte_free(priv); 1948 return ret; 1949 } 1950 1951 static int 1952 dpaa2_sec_auth_init(struct rte_cryptodev *dev, 1953 struct rte_crypto_sym_xform *xform, 1954 dpaa2_sec_session *session) 1955 { 1956 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1957 struct alginfo authdata; 1958 int bufsize, ret = 0; 1959 struct ctxt_priv *priv; 1960 struct sec_flow_context *flc; 1961 1962 PMD_INIT_FUNC_TRACE(); 1963 1964 /* For SEC AUTH three descriptors are required for various stages */ 1965 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1966 sizeof(struct ctxt_priv) + 3 * 1967 sizeof(struct sec_flc_desc), 1968 RTE_CACHE_LINE_SIZE); 1969 if (priv == NULL) { 1970 DPAA2_SEC_ERR("No Memory for priv CTXT"); 1971 return -ENOMEM; 1972 } 1973 1974 priv->fle_pool = dev_priv->fle_pool; 1975 flc = &priv->flc_desc[DESC_INITFINAL].flc; 1976 1977 session->ctxt_type = DPAA2_SEC_AUTH; 1978 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length, 1979 RTE_CACHE_LINE_SIZE); 1980 if (session->auth_key.data == NULL) { 1981 DPAA2_SEC_ERR("Unable to allocate memory for auth key"); 1982 rte_free(priv); 1983 return -ENOMEM; 1984 } 1985 session->auth_key.length = xform->auth.key.length; 1986 1987 memcpy(session->auth_key.data, xform->auth.key.data, 1988 xform->auth.key.length); 1989 authdata.key = (size_t)session->auth_key.data; 1990 authdata.keylen = session->auth_key.length; 1991 authdata.key_enc_flags = 0; 1992 authdata.key_type = RTA_DATA_IMM; 1993 1994 session->digest_length = xform->auth.digest_length; 1995 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? 1996 DIR_ENC : DIR_DEC; 1997 1998 switch (xform->auth.algo) { 1999 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2000 authdata.algtype = OP_ALG_ALGSEL_SHA1; 2001 authdata.algmode = OP_ALG_AAI_HMAC; 2002 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 2003 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2004 1, 0, SHR_NEVER, &authdata, 2005 !session->dir, 2006 session->digest_length); 2007 break; 2008 case RTE_CRYPTO_AUTH_MD5_HMAC: 2009 authdata.algtype = OP_ALG_ALGSEL_MD5; 2010 authdata.algmode = OP_ALG_AAI_HMAC; 2011 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 2012 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2013 1, 0, SHR_NEVER, &authdata, 2014 !session->dir, 2015 session->digest_length); 2016 break; 2017 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2018 authdata.algtype = OP_ALG_ALGSEL_SHA256; 2019 authdata.algmode = OP_ALG_AAI_HMAC; 2020 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 2021 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2022 1, 0, SHR_NEVER, &authdata, 2023 !session->dir, 2024 session->digest_length); 2025 break; 2026 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2027 authdata.algtype = OP_ALG_ALGSEL_SHA384; 2028 authdata.algmode = OP_ALG_AAI_HMAC; 2029 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 2030 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2031 1, 0, SHR_NEVER, &authdata, 2032 !session->dir, 2033 session->digest_length); 2034 break; 2035 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2036 authdata.algtype = OP_ALG_ALGSEL_SHA512; 2037 authdata.algmode = OP_ALG_AAI_HMAC; 2038 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 2039 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2040 1, 0, SHR_NEVER, &authdata, 2041 !session->dir, 2042 session->digest_length); 2043 break; 2044 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2045 authdata.algtype = OP_ALG_ALGSEL_SHA224; 2046 authdata.algmode = OP_ALG_AAI_HMAC; 2047 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 2048 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2049 1, 0, SHR_NEVER, &authdata, 2050 !session->dir, 2051 session->digest_length); 2052 break; 2053 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2054 authdata.algtype = OP_ALG_ALGSEL_SNOW_F9; 2055 authdata.algmode = OP_ALG_AAI_F9; 2056 session->auth_alg = RTE_CRYPTO_AUTH_SNOW3G_UIA2; 2057 session->iv.offset = xform->auth.iv.offset; 2058 session->iv.length = xform->auth.iv.length; 2059 bufsize = cnstr_shdsc_snow_f9(priv->flc_desc[DESC_INITFINAL].desc, 2060 1, 0, &authdata, 2061 !session->dir, 2062 session->digest_length); 2063 break; 2064 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2065 authdata.algtype = OP_ALG_ALGSEL_ZUCA; 2066 authdata.algmode = OP_ALG_AAI_F9; 2067 session->auth_alg = RTE_CRYPTO_AUTH_ZUC_EIA3; 2068 session->iv.offset = xform->auth.iv.offset; 2069 session->iv.length = xform->auth.iv.length; 2070 bufsize = cnstr_shdsc_zuca(priv->flc_desc[DESC_INITFINAL].desc, 2071 1, 0, &authdata, 2072 !session->dir, 2073 session->digest_length); 2074 break; 2075 case RTE_CRYPTO_AUTH_KASUMI_F9: 2076 case RTE_CRYPTO_AUTH_NULL: 2077 case RTE_CRYPTO_AUTH_SHA1: 2078 case RTE_CRYPTO_AUTH_SHA256: 2079 case RTE_CRYPTO_AUTH_SHA512: 2080 case RTE_CRYPTO_AUTH_SHA224: 2081 case RTE_CRYPTO_AUTH_SHA384: 2082 case RTE_CRYPTO_AUTH_MD5: 2083 case RTE_CRYPTO_AUTH_AES_GMAC: 2084 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2085 case RTE_CRYPTO_AUTH_AES_CMAC: 2086 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 2087 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un", 2088 xform->auth.algo); 2089 ret = -ENOTSUP; 2090 goto error_out; 2091 default: 2092 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", 2093 xform->auth.algo); 2094 ret = -ENOTSUP; 2095 goto error_out; 2096 } 2097 2098 if (bufsize < 0) { 2099 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2100 ret = -EINVAL; 2101 goto error_out; 2102 } 2103 2104 flc->word1_sdl = (uint8_t)bufsize; 2105 session->ctxt = priv; 2106 #ifdef CAAM_DESC_DEBUG 2107 int i; 2108 for (i = 0; i < bufsize; i++) 2109 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", 2110 i, priv->flc_desc[DESC_INITFINAL].desc[i]); 2111 #endif 2112 2113 return ret; 2114 2115 error_out: 2116 rte_free(session->auth_key.data); 2117 rte_free(priv); 2118 return ret; 2119 } 2120 2121 static int 2122 dpaa2_sec_aead_init(struct rte_cryptodev *dev, 2123 struct rte_crypto_sym_xform *xform, 2124 dpaa2_sec_session *session) 2125 { 2126 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt; 2127 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 2128 struct alginfo aeaddata; 2129 int bufsize; 2130 struct ctxt_priv *priv; 2131 struct sec_flow_context *flc; 2132 struct rte_crypto_aead_xform *aead_xform = &xform->aead; 2133 int err, ret = 0; 2134 2135 PMD_INIT_FUNC_TRACE(); 2136 2137 /* Set IV parameters */ 2138 session->iv.offset = aead_xform->iv.offset; 2139 session->iv.length = aead_xform->iv.length; 2140 session->ctxt_type = DPAA2_SEC_AEAD; 2141 2142 /* For SEC AEAD only one descriptor is required */ 2143 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2144 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 2145 RTE_CACHE_LINE_SIZE); 2146 if (priv == NULL) { 2147 DPAA2_SEC_ERR("No Memory for priv CTXT"); 2148 return -ENOMEM; 2149 } 2150 2151 priv->fle_pool = dev_priv->fle_pool; 2152 flc = &priv->flc_desc[0].flc; 2153 2154 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 2155 RTE_CACHE_LINE_SIZE); 2156 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 2157 DPAA2_SEC_ERR("No Memory for aead key"); 2158 rte_free(priv); 2159 return -ENOMEM; 2160 } 2161 memcpy(session->aead_key.data, aead_xform->key.data, 2162 aead_xform->key.length); 2163 2164 session->digest_length = aead_xform->digest_length; 2165 session->aead_key.length = aead_xform->key.length; 2166 ctxt->auth_only_len = aead_xform->aad_length; 2167 2168 aeaddata.key = (size_t)session->aead_key.data; 2169 aeaddata.keylen = session->aead_key.length; 2170 aeaddata.key_enc_flags = 0; 2171 aeaddata.key_type = RTA_DATA_IMM; 2172 2173 switch (aead_xform->algo) { 2174 case RTE_CRYPTO_AEAD_AES_GCM: 2175 aeaddata.algtype = OP_ALG_ALGSEL_AES; 2176 aeaddata.algmode = OP_ALG_AAI_GCM; 2177 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; 2178 break; 2179 case RTE_CRYPTO_AEAD_AES_CCM: 2180 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u", 2181 aead_xform->algo); 2182 ret = -ENOTSUP; 2183 goto error_out; 2184 default: 2185 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u", 2186 aead_xform->algo); 2187 ret = -ENOTSUP; 2188 goto error_out; 2189 } 2190 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 2191 DIR_ENC : DIR_DEC; 2192 2193 priv->flc_desc[0].desc[0] = aeaddata.keylen; 2194 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 2195 MIN_JOB_DESC_SIZE, 2196 (unsigned int *)priv->flc_desc[0].desc, 2197 &priv->flc_desc[0].desc[1], 1); 2198 2199 if (err < 0) { 2200 DPAA2_SEC_ERR("Crypto: Incorrect key lengths"); 2201 ret = -EINVAL; 2202 goto error_out; 2203 } 2204 if (priv->flc_desc[0].desc[1] & 1) { 2205 aeaddata.key_type = RTA_DATA_IMM; 2206 } else { 2207 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key); 2208 aeaddata.key_type = RTA_DATA_PTR; 2209 } 2210 priv->flc_desc[0].desc[0] = 0; 2211 priv->flc_desc[0].desc[1] = 0; 2212 2213 if (session->dir == DIR_ENC) 2214 bufsize = cnstr_shdsc_gcm_encap( 2215 priv->flc_desc[0].desc, 1, 0, SHR_NEVER, 2216 &aeaddata, session->iv.length, 2217 session->digest_length); 2218 else 2219 bufsize = cnstr_shdsc_gcm_decap( 2220 priv->flc_desc[0].desc, 1, 0, SHR_NEVER, 2221 &aeaddata, session->iv.length, 2222 session->digest_length); 2223 if (bufsize < 0) { 2224 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2225 ret = -EINVAL; 2226 goto error_out; 2227 } 2228 2229 flc->word1_sdl = (uint8_t)bufsize; 2230 session->ctxt = priv; 2231 #ifdef CAAM_DESC_DEBUG 2232 int i; 2233 for (i = 0; i < bufsize; i++) 2234 DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n", 2235 i, priv->flc_desc[0].desc[i]); 2236 #endif 2237 return ret; 2238 2239 error_out: 2240 rte_free(session->aead_key.data); 2241 rte_free(priv); 2242 return ret; 2243 } 2244 2245 2246 static int 2247 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, 2248 struct rte_crypto_sym_xform *xform, 2249 dpaa2_sec_session *session) 2250 { 2251 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 2252 struct alginfo authdata, cipherdata; 2253 int bufsize; 2254 struct ctxt_priv *priv; 2255 struct sec_flow_context *flc; 2256 struct rte_crypto_cipher_xform *cipher_xform; 2257 struct rte_crypto_auth_xform *auth_xform; 2258 int err, ret = 0; 2259 2260 PMD_INIT_FUNC_TRACE(); 2261 2262 if (session->ext_params.aead_ctxt.auth_cipher_text) { 2263 cipher_xform = &xform->cipher; 2264 auth_xform = &xform->next->auth; 2265 session->ctxt_type = 2266 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2267 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER; 2268 } else { 2269 cipher_xform = &xform->next->cipher; 2270 auth_xform = &xform->auth; 2271 session->ctxt_type = 2272 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2273 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH; 2274 } 2275 2276 /* Set IV parameters */ 2277 session->iv.offset = cipher_xform->iv.offset; 2278 session->iv.length = cipher_xform->iv.length; 2279 2280 /* For SEC AEAD only one descriptor is required */ 2281 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2282 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 2283 RTE_CACHE_LINE_SIZE); 2284 if (priv == NULL) { 2285 DPAA2_SEC_ERR("No Memory for priv CTXT"); 2286 return -ENOMEM; 2287 } 2288 2289 priv->fle_pool = dev_priv->fle_pool; 2290 flc = &priv->flc_desc[0].flc; 2291 2292 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length, 2293 RTE_CACHE_LINE_SIZE); 2294 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) { 2295 DPAA2_SEC_ERR("No Memory for cipher key"); 2296 rte_free(priv); 2297 return -ENOMEM; 2298 } 2299 session->cipher_key.length = cipher_xform->key.length; 2300 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length, 2301 RTE_CACHE_LINE_SIZE); 2302 if (session->auth_key.data == NULL && auth_xform->key.length > 0) { 2303 DPAA2_SEC_ERR("No Memory for auth key"); 2304 rte_free(session->cipher_key.data); 2305 rte_free(priv); 2306 return -ENOMEM; 2307 } 2308 session->auth_key.length = auth_xform->key.length; 2309 memcpy(session->cipher_key.data, cipher_xform->key.data, 2310 cipher_xform->key.length); 2311 memcpy(session->auth_key.data, auth_xform->key.data, 2312 auth_xform->key.length); 2313 2314 authdata.key = (size_t)session->auth_key.data; 2315 authdata.keylen = session->auth_key.length; 2316 authdata.key_enc_flags = 0; 2317 authdata.key_type = RTA_DATA_IMM; 2318 2319 session->digest_length = auth_xform->digest_length; 2320 2321 switch (auth_xform->algo) { 2322 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2323 authdata.algtype = OP_ALG_ALGSEL_SHA1; 2324 authdata.algmode = OP_ALG_AAI_HMAC; 2325 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 2326 break; 2327 case RTE_CRYPTO_AUTH_MD5_HMAC: 2328 authdata.algtype = OP_ALG_ALGSEL_MD5; 2329 authdata.algmode = OP_ALG_AAI_HMAC; 2330 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 2331 break; 2332 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2333 authdata.algtype = OP_ALG_ALGSEL_SHA224; 2334 authdata.algmode = OP_ALG_AAI_HMAC; 2335 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 2336 break; 2337 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2338 authdata.algtype = OP_ALG_ALGSEL_SHA256; 2339 authdata.algmode = OP_ALG_AAI_HMAC; 2340 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 2341 break; 2342 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2343 authdata.algtype = OP_ALG_ALGSEL_SHA384; 2344 authdata.algmode = OP_ALG_AAI_HMAC; 2345 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 2346 break; 2347 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2348 authdata.algtype = OP_ALG_ALGSEL_SHA512; 2349 authdata.algmode = OP_ALG_AAI_HMAC; 2350 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 2351 break; 2352 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2353 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2354 case RTE_CRYPTO_AUTH_NULL: 2355 case RTE_CRYPTO_AUTH_SHA1: 2356 case RTE_CRYPTO_AUTH_SHA256: 2357 case RTE_CRYPTO_AUTH_SHA512: 2358 case RTE_CRYPTO_AUTH_SHA224: 2359 case RTE_CRYPTO_AUTH_SHA384: 2360 case RTE_CRYPTO_AUTH_MD5: 2361 case RTE_CRYPTO_AUTH_AES_GMAC: 2362 case RTE_CRYPTO_AUTH_KASUMI_F9: 2363 case RTE_CRYPTO_AUTH_AES_CMAC: 2364 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 2365 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2366 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 2367 auth_xform->algo); 2368 ret = -ENOTSUP; 2369 goto error_out; 2370 default: 2371 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", 2372 auth_xform->algo); 2373 ret = -ENOTSUP; 2374 goto error_out; 2375 } 2376 cipherdata.key = (size_t)session->cipher_key.data; 2377 cipherdata.keylen = session->cipher_key.length; 2378 cipherdata.key_enc_flags = 0; 2379 cipherdata.key_type = RTA_DATA_IMM; 2380 2381 switch (cipher_xform->algo) { 2382 case RTE_CRYPTO_CIPHER_AES_CBC: 2383 cipherdata.algtype = OP_ALG_ALGSEL_AES; 2384 cipherdata.algmode = OP_ALG_AAI_CBC; 2385 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 2386 break; 2387 case RTE_CRYPTO_CIPHER_3DES_CBC: 2388 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 2389 cipherdata.algmode = OP_ALG_AAI_CBC; 2390 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 2391 break; 2392 case RTE_CRYPTO_CIPHER_AES_CTR: 2393 cipherdata.algtype = OP_ALG_ALGSEL_AES; 2394 cipherdata.algmode = OP_ALG_AAI_CTR; 2395 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 2396 break; 2397 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2398 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2399 case RTE_CRYPTO_CIPHER_NULL: 2400 case RTE_CRYPTO_CIPHER_3DES_ECB: 2401 case RTE_CRYPTO_CIPHER_AES_ECB: 2402 case RTE_CRYPTO_CIPHER_KASUMI_F8: 2403 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", 2404 cipher_xform->algo); 2405 ret = -ENOTSUP; 2406 goto error_out; 2407 default: 2408 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 2409 cipher_xform->algo); 2410 ret = -ENOTSUP; 2411 goto error_out; 2412 } 2413 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2414 DIR_ENC : DIR_DEC; 2415 2416 priv->flc_desc[0].desc[0] = cipherdata.keylen; 2417 priv->flc_desc[0].desc[1] = authdata.keylen; 2418 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 2419 MIN_JOB_DESC_SIZE, 2420 (unsigned int *)priv->flc_desc[0].desc, 2421 &priv->flc_desc[0].desc[2], 2); 2422 2423 if (err < 0) { 2424 DPAA2_SEC_ERR("Crypto: Incorrect key lengths"); 2425 ret = -EINVAL; 2426 goto error_out; 2427 } 2428 if (priv->flc_desc[0].desc[2] & 1) { 2429 cipherdata.key_type = RTA_DATA_IMM; 2430 } else { 2431 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); 2432 cipherdata.key_type = RTA_DATA_PTR; 2433 } 2434 if (priv->flc_desc[0].desc[2] & (1 << 1)) { 2435 authdata.key_type = RTA_DATA_IMM; 2436 } else { 2437 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key); 2438 authdata.key_type = RTA_DATA_PTR; 2439 } 2440 priv->flc_desc[0].desc[0] = 0; 2441 priv->flc_desc[0].desc[1] = 0; 2442 priv->flc_desc[0].desc[2] = 0; 2443 2444 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) { 2445 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1, 2446 0, SHR_SERIAL, 2447 &cipherdata, &authdata, 2448 session->iv.length, 2449 session->digest_length, 2450 session->dir); 2451 if (bufsize < 0) { 2452 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2453 ret = -EINVAL; 2454 goto error_out; 2455 } 2456 } else { 2457 DPAA2_SEC_ERR("Hash before cipher not supported"); 2458 ret = -ENOTSUP; 2459 goto error_out; 2460 } 2461 2462 flc->word1_sdl = (uint8_t)bufsize; 2463 session->ctxt = priv; 2464 #ifdef CAAM_DESC_DEBUG 2465 int i; 2466 for (i = 0; i < bufsize; i++) 2467 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", 2468 i, priv->flc_desc[0].desc[i]); 2469 #endif 2470 2471 return ret; 2472 2473 error_out: 2474 rte_free(session->cipher_key.data); 2475 rte_free(session->auth_key.data); 2476 rte_free(priv); 2477 return ret; 2478 } 2479 2480 static int 2481 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev, 2482 struct rte_crypto_sym_xform *xform, void *sess) 2483 { 2484 dpaa2_sec_session *session = sess; 2485 int ret; 2486 2487 PMD_INIT_FUNC_TRACE(); 2488 2489 if (unlikely(sess == NULL)) { 2490 DPAA2_SEC_ERR("Invalid session struct"); 2491 return -EINVAL; 2492 } 2493 2494 memset(session, 0, sizeof(dpaa2_sec_session)); 2495 /* Default IV length = 0 */ 2496 session->iv.length = 0; 2497 2498 /* Cipher Only */ 2499 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 2500 ret = dpaa2_sec_cipher_init(dev, xform, session); 2501 2502 /* Authentication Only */ 2503 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2504 xform->next == NULL) { 2505 ret = dpaa2_sec_auth_init(dev, xform, session); 2506 2507 /* Cipher then Authenticate */ 2508 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 2509 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2510 session->ext_params.aead_ctxt.auth_cipher_text = true; 2511 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL) 2512 ret = dpaa2_sec_auth_init(dev, xform, session); 2513 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL) 2514 ret = dpaa2_sec_cipher_init(dev, xform, session); 2515 else 2516 ret = dpaa2_sec_aead_chain_init(dev, xform, session); 2517 /* Authenticate then Cipher */ 2518 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2519 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2520 session->ext_params.aead_ctxt.auth_cipher_text = false; 2521 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL) 2522 ret = dpaa2_sec_cipher_init(dev, xform, session); 2523 else if (xform->next->cipher.algo == RTE_CRYPTO_CIPHER_NULL) 2524 ret = dpaa2_sec_auth_init(dev, xform, session); 2525 else 2526 ret = dpaa2_sec_aead_chain_init(dev, xform, session); 2527 /* AEAD operation for AES-GCM kind of Algorithms */ 2528 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && 2529 xform->next == NULL) { 2530 ret = dpaa2_sec_aead_init(dev, xform, session); 2531 2532 } else { 2533 DPAA2_SEC_ERR("Invalid crypto type"); 2534 return -EINVAL; 2535 } 2536 2537 return ret; 2538 } 2539 2540 #ifdef RTE_LIBRTE_SECURITY 2541 static int 2542 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform, 2543 dpaa2_sec_session *session, 2544 struct alginfo *aeaddata) 2545 { 2546 PMD_INIT_FUNC_TRACE(); 2547 2548 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 2549 RTE_CACHE_LINE_SIZE); 2550 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 2551 DPAA2_SEC_ERR("No Memory for aead key"); 2552 return -ENOMEM; 2553 } 2554 memcpy(session->aead_key.data, aead_xform->key.data, 2555 aead_xform->key.length); 2556 2557 session->digest_length = aead_xform->digest_length; 2558 session->aead_key.length = aead_xform->key.length; 2559 2560 aeaddata->key = (size_t)session->aead_key.data; 2561 aeaddata->keylen = session->aead_key.length; 2562 aeaddata->key_enc_flags = 0; 2563 aeaddata->key_type = RTA_DATA_IMM; 2564 2565 switch (aead_xform->algo) { 2566 case RTE_CRYPTO_AEAD_AES_GCM: 2567 switch (session->digest_length) { 2568 case 8: 2569 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM8; 2570 break; 2571 case 12: 2572 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM12; 2573 break; 2574 case 16: 2575 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM16; 2576 break; 2577 default: 2578 DPAA2_SEC_ERR("Crypto: Undefined GCM digest %d", 2579 session->digest_length); 2580 return -EINVAL; 2581 } 2582 aeaddata->algmode = OP_ALG_AAI_GCM; 2583 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; 2584 break; 2585 case RTE_CRYPTO_AEAD_AES_CCM: 2586 switch (session->digest_length) { 2587 case 8: 2588 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM8; 2589 break; 2590 case 12: 2591 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM12; 2592 break; 2593 case 16: 2594 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM16; 2595 break; 2596 default: 2597 DPAA2_SEC_ERR("Crypto: Undefined CCM digest %d", 2598 session->digest_length); 2599 return -EINVAL; 2600 } 2601 aeaddata->algmode = OP_ALG_AAI_CCM; 2602 session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM; 2603 break; 2604 default: 2605 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u", 2606 aead_xform->algo); 2607 return -ENOTSUP; 2608 } 2609 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 2610 DIR_ENC : DIR_DEC; 2611 2612 return 0; 2613 } 2614 2615 static int 2616 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform, 2617 struct rte_crypto_auth_xform *auth_xform, 2618 dpaa2_sec_session *session, 2619 struct alginfo *cipherdata, 2620 struct alginfo *authdata) 2621 { 2622 if (cipher_xform) { 2623 session->cipher_key.data = rte_zmalloc(NULL, 2624 cipher_xform->key.length, 2625 RTE_CACHE_LINE_SIZE); 2626 if (session->cipher_key.data == NULL && 2627 cipher_xform->key.length > 0) { 2628 DPAA2_SEC_ERR("No Memory for cipher key"); 2629 return -ENOMEM; 2630 } 2631 2632 session->cipher_key.length = cipher_xform->key.length; 2633 memcpy(session->cipher_key.data, cipher_xform->key.data, 2634 cipher_xform->key.length); 2635 session->cipher_alg = cipher_xform->algo; 2636 } else { 2637 session->cipher_key.data = NULL; 2638 session->cipher_key.length = 0; 2639 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2640 } 2641 2642 if (auth_xform) { 2643 session->auth_key.data = rte_zmalloc(NULL, 2644 auth_xform->key.length, 2645 RTE_CACHE_LINE_SIZE); 2646 if (session->auth_key.data == NULL && 2647 auth_xform->key.length > 0) { 2648 DPAA2_SEC_ERR("No Memory for auth key"); 2649 return -ENOMEM; 2650 } 2651 session->auth_key.length = auth_xform->key.length; 2652 memcpy(session->auth_key.data, auth_xform->key.data, 2653 auth_xform->key.length); 2654 session->auth_alg = auth_xform->algo; 2655 session->digest_length = auth_xform->digest_length; 2656 } else { 2657 session->auth_key.data = NULL; 2658 session->auth_key.length = 0; 2659 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 2660 } 2661 2662 authdata->key = (size_t)session->auth_key.data; 2663 authdata->keylen = session->auth_key.length; 2664 authdata->key_enc_flags = 0; 2665 authdata->key_type = RTA_DATA_IMM; 2666 switch (session->auth_alg) { 2667 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2668 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96; 2669 authdata->algmode = OP_ALG_AAI_HMAC; 2670 break; 2671 case RTE_CRYPTO_AUTH_MD5_HMAC: 2672 authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96; 2673 authdata->algmode = OP_ALG_AAI_HMAC; 2674 break; 2675 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2676 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128; 2677 authdata->algmode = OP_ALG_AAI_HMAC; 2678 if (session->digest_length != 16) 2679 DPAA2_SEC_WARN( 2680 "+++Using sha256-hmac truncated len is non-standard," 2681 "it will not work with lookaside proto"); 2682 break; 2683 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2684 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192; 2685 authdata->algmode = OP_ALG_AAI_HMAC; 2686 break; 2687 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2688 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256; 2689 authdata->algmode = OP_ALG_AAI_HMAC; 2690 break; 2691 case RTE_CRYPTO_AUTH_AES_CMAC: 2692 authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96; 2693 break; 2694 case RTE_CRYPTO_AUTH_NULL: 2695 authdata->algtype = OP_PCL_IPSEC_HMAC_NULL; 2696 break; 2697 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2698 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2699 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2700 case RTE_CRYPTO_AUTH_SHA1: 2701 case RTE_CRYPTO_AUTH_SHA256: 2702 case RTE_CRYPTO_AUTH_SHA512: 2703 case RTE_CRYPTO_AUTH_SHA224: 2704 case RTE_CRYPTO_AUTH_SHA384: 2705 case RTE_CRYPTO_AUTH_MD5: 2706 case RTE_CRYPTO_AUTH_AES_GMAC: 2707 case RTE_CRYPTO_AUTH_KASUMI_F9: 2708 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 2709 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2710 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 2711 session->auth_alg); 2712 return -ENOTSUP; 2713 default: 2714 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", 2715 session->auth_alg); 2716 return -ENOTSUP; 2717 } 2718 cipherdata->key = (size_t)session->cipher_key.data; 2719 cipherdata->keylen = session->cipher_key.length; 2720 cipherdata->key_enc_flags = 0; 2721 cipherdata->key_type = RTA_DATA_IMM; 2722 2723 switch (session->cipher_alg) { 2724 case RTE_CRYPTO_CIPHER_AES_CBC: 2725 cipherdata->algtype = OP_PCL_IPSEC_AES_CBC; 2726 cipherdata->algmode = OP_ALG_AAI_CBC; 2727 break; 2728 case RTE_CRYPTO_CIPHER_3DES_CBC: 2729 cipherdata->algtype = OP_PCL_IPSEC_3DES; 2730 cipherdata->algmode = OP_ALG_AAI_CBC; 2731 break; 2732 case RTE_CRYPTO_CIPHER_AES_CTR: 2733 cipherdata->algtype = OP_PCL_IPSEC_AES_CTR; 2734 cipherdata->algmode = OP_ALG_AAI_CTR; 2735 break; 2736 case RTE_CRYPTO_CIPHER_NULL: 2737 cipherdata->algtype = OP_PCL_IPSEC_NULL; 2738 break; 2739 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2740 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2741 case RTE_CRYPTO_CIPHER_3DES_ECB: 2742 case RTE_CRYPTO_CIPHER_AES_ECB: 2743 case RTE_CRYPTO_CIPHER_KASUMI_F8: 2744 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", 2745 session->cipher_alg); 2746 return -ENOTSUP; 2747 default: 2748 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 2749 session->cipher_alg); 2750 return -ENOTSUP; 2751 } 2752 2753 return 0; 2754 } 2755 2756 #ifdef RTE_LIBRTE_SECURITY_TEST 2757 static uint8_t aes_cbc_iv[] = { 2758 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 2759 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }; 2760 #endif 2761 2762 static int 2763 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, 2764 struct rte_security_session_conf *conf, 2765 void *sess) 2766 { 2767 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec; 2768 struct rte_crypto_cipher_xform *cipher_xform = NULL; 2769 struct rte_crypto_auth_xform *auth_xform = NULL; 2770 struct rte_crypto_aead_xform *aead_xform = NULL; 2771 dpaa2_sec_session *session = (dpaa2_sec_session *)sess; 2772 struct ctxt_priv *priv; 2773 struct alginfo authdata, cipherdata; 2774 int bufsize; 2775 struct sec_flow_context *flc; 2776 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 2777 int ret = -1; 2778 2779 PMD_INIT_FUNC_TRACE(); 2780 2781 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2782 sizeof(struct ctxt_priv) + 2783 sizeof(struct sec_flc_desc), 2784 RTE_CACHE_LINE_SIZE); 2785 2786 if (priv == NULL) { 2787 DPAA2_SEC_ERR("No memory for priv CTXT"); 2788 return -ENOMEM; 2789 } 2790 2791 priv->fle_pool = dev_priv->fle_pool; 2792 flc = &priv->flc_desc[0].flc; 2793 2794 memset(session, 0, sizeof(dpaa2_sec_session)); 2795 2796 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2797 cipher_xform = &conf->crypto_xform->cipher; 2798 if (conf->crypto_xform->next) 2799 auth_xform = &conf->crypto_xform->next->auth; 2800 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform, 2801 session, &cipherdata, &authdata); 2802 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2803 auth_xform = &conf->crypto_xform->auth; 2804 if (conf->crypto_xform->next) 2805 cipher_xform = &conf->crypto_xform->next->cipher; 2806 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform, 2807 session, &cipherdata, &authdata); 2808 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { 2809 aead_xform = &conf->crypto_xform->aead; 2810 ret = dpaa2_sec_ipsec_aead_init(aead_xform, 2811 session, &cipherdata); 2812 authdata.keylen = 0; 2813 authdata.algtype = 0; 2814 } else { 2815 DPAA2_SEC_ERR("XFORM not specified"); 2816 ret = -EINVAL; 2817 goto out; 2818 } 2819 if (ret) { 2820 DPAA2_SEC_ERR("Failed to process xform"); 2821 goto out; 2822 } 2823 2824 session->ctxt_type = DPAA2_SEC_IPSEC; 2825 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 2826 uint8_t *hdr = NULL; 2827 struct ip ip4_hdr; 2828 struct rte_ipv6_hdr ip6_hdr; 2829 struct ipsec_encap_pdb encap_pdb; 2830 2831 flc->dhr = SEC_FLC_DHR_OUTBOUND; 2832 /* For Sec Proto only one descriptor is required. */ 2833 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb)); 2834 2835 /* copy algo specific data to PDB */ 2836 switch (cipherdata.algtype) { 2837 case OP_PCL_IPSEC_AES_CTR: 2838 encap_pdb.ctr.ctr_initial = 0x00000001; 2839 encap_pdb.ctr.ctr_nonce = ipsec_xform->salt; 2840 break; 2841 case OP_PCL_IPSEC_AES_GCM8: 2842 case OP_PCL_IPSEC_AES_GCM12: 2843 case OP_PCL_IPSEC_AES_GCM16: 2844 memcpy(encap_pdb.gcm.salt, 2845 (uint8_t *)&(ipsec_xform->salt), 4); 2846 break; 2847 } 2848 2849 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) | 2850 PDBOPTS_ESP_OIHI_PDB_INL | 2851 PDBOPTS_ESP_IVSRC | 2852 PDBHMO_ESP_ENCAP_DTTL | 2853 PDBHMO_ESP_SNR; 2854 if (ipsec_xform->options.esn) 2855 encap_pdb.options |= PDBOPTS_ESP_ESN; 2856 encap_pdb.spi = ipsec_xform->spi; 2857 session->dir = DIR_ENC; 2858 if (ipsec_xform->tunnel.type == 2859 RTE_SECURITY_IPSEC_TUNNEL_IPV4) { 2860 encap_pdb.ip_hdr_len = sizeof(struct ip); 2861 ip4_hdr.ip_v = IPVERSION; 2862 ip4_hdr.ip_hl = 5; 2863 ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr)); 2864 ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp; 2865 ip4_hdr.ip_id = 0; 2866 ip4_hdr.ip_off = 0; 2867 ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl; 2868 ip4_hdr.ip_p = IPPROTO_ESP; 2869 ip4_hdr.ip_sum = 0; 2870 ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip; 2871 ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip; 2872 ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *) 2873 &ip4_hdr, sizeof(struct ip)); 2874 hdr = (uint8_t *)&ip4_hdr; 2875 } else if (ipsec_xform->tunnel.type == 2876 RTE_SECURITY_IPSEC_TUNNEL_IPV6) { 2877 ip6_hdr.vtc_flow = rte_cpu_to_be_32( 2878 DPAA2_IPv6_DEFAULT_VTC_FLOW | 2879 ((ipsec_xform->tunnel.ipv6.dscp << 2880 RTE_IPV6_HDR_TC_SHIFT) & 2881 RTE_IPV6_HDR_TC_MASK) | 2882 ((ipsec_xform->tunnel.ipv6.flabel << 2883 RTE_IPV6_HDR_FL_SHIFT) & 2884 RTE_IPV6_HDR_FL_MASK)); 2885 /* Payload length will be updated by HW */ 2886 ip6_hdr.payload_len = 0; 2887 ip6_hdr.hop_limits = 2888 ipsec_xform->tunnel.ipv6.hlimit; 2889 ip6_hdr.proto = (ipsec_xform->proto == 2890 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? 2891 IPPROTO_ESP : IPPROTO_AH; 2892 memcpy(&ip6_hdr.src_addr, 2893 &ipsec_xform->tunnel.ipv6.src_addr, 16); 2894 memcpy(&ip6_hdr.dst_addr, 2895 &ipsec_xform->tunnel.ipv6.dst_addr, 16); 2896 encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr); 2897 hdr = (uint8_t *)&ip6_hdr; 2898 } 2899 2900 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc, 2901 1, 0, SHR_SERIAL, &encap_pdb, 2902 hdr, &cipherdata, &authdata); 2903 } else if (ipsec_xform->direction == 2904 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { 2905 struct ipsec_decap_pdb decap_pdb; 2906 2907 flc->dhr = SEC_FLC_DHR_INBOUND; 2908 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb)); 2909 /* copy algo specific data to PDB */ 2910 switch (cipherdata.algtype) { 2911 case OP_PCL_IPSEC_AES_CTR: 2912 decap_pdb.ctr.ctr_initial = 0x00000001; 2913 decap_pdb.ctr.ctr_nonce = ipsec_xform->salt; 2914 break; 2915 case OP_PCL_IPSEC_AES_GCM8: 2916 case OP_PCL_IPSEC_AES_GCM12: 2917 case OP_PCL_IPSEC_AES_GCM16: 2918 memcpy(decap_pdb.gcm.salt, 2919 (uint8_t *)&(ipsec_xform->salt), 4); 2920 break; 2921 } 2922 2923 decap_pdb.options = (ipsec_xform->tunnel.type == 2924 RTE_SECURITY_IPSEC_TUNNEL_IPV4) ? 2925 sizeof(struct ip) << 16 : 2926 sizeof(struct rte_ipv6_hdr) << 16; 2927 if (ipsec_xform->options.esn) 2928 decap_pdb.options |= PDBOPTS_ESP_ESN; 2929 2930 if (ipsec_xform->replay_win_sz) { 2931 uint32_t win_sz; 2932 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz); 2933 2934 switch (win_sz) { 2935 case 1: 2936 case 2: 2937 case 4: 2938 case 8: 2939 case 16: 2940 case 32: 2941 decap_pdb.options |= PDBOPTS_ESP_ARS32; 2942 break; 2943 case 64: 2944 decap_pdb.options |= PDBOPTS_ESP_ARS64; 2945 break; 2946 default: 2947 decap_pdb.options |= PDBOPTS_ESP_ARS128; 2948 } 2949 } 2950 session->dir = DIR_DEC; 2951 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc, 2952 1, 0, SHR_SERIAL, 2953 &decap_pdb, &cipherdata, &authdata); 2954 } else 2955 goto out; 2956 2957 if (bufsize < 0) { 2958 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2959 goto out; 2960 } 2961 2962 flc->word1_sdl = (uint8_t)bufsize; 2963 2964 /* Enable the stashing control bit */ 2965 DPAA2_SET_FLC_RSC(flc); 2966 flc->word2_rflc_31_0 = lower_32_bits( 2967 (size_t)&(((struct dpaa2_sec_qp *) 2968 dev->data->queue_pairs[0])->rx_vq) | 0x14); 2969 flc->word3_rflc_63_32 = upper_32_bits( 2970 (size_t)&(((struct dpaa2_sec_qp *) 2971 dev->data->queue_pairs[0])->rx_vq)); 2972 2973 /* Set EWS bit i.e. enable write-safe */ 2974 DPAA2_SET_FLC_EWS(flc); 2975 /* Set BS = 1 i.e reuse input buffers as output buffers */ 2976 DPAA2_SET_FLC_REUSE_BS(flc); 2977 /* Set FF = 10; reuse input buffers if they provide sufficient space */ 2978 DPAA2_SET_FLC_REUSE_FF(flc); 2979 2980 session->ctxt = priv; 2981 2982 return 0; 2983 out: 2984 rte_free(session->auth_key.data); 2985 rte_free(session->cipher_key.data); 2986 rte_free(priv); 2987 return ret; 2988 } 2989 2990 static int 2991 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev, 2992 struct rte_security_session_conf *conf, 2993 void *sess) 2994 { 2995 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp; 2996 struct rte_crypto_sym_xform *xform = conf->crypto_xform; 2997 struct rte_crypto_auth_xform *auth_xform = NULL; 2998 struct rte_crypto_cipher_xform *cipher_xform; 2999 dpaa2_sec_session *session = (dpaa2_sec_session *)sess; 3000 struct ctxt_priv *priv; 3001 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 3002 struct alginfo authdata, cipherdata; 3003 struct alginfo *p_authdata = NULL; 3004 int bufsize = -1; 3005 struct sec_flow_context *flc; 3006 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 3007 int swap = true; 3008 #else 3009 int swap = false; 3010 #endif 3011 3012 PMD_INIT_FUNC_TRACE(); 3013 3014 memset(session, 0, sizeof(dpaa2_sec_session)); 3015 3016 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 3017 sizeof(struct ctxt_priv) + 3018 sizeof(struct sec_flc_desc), 3019 RTE_CACHE_LINE_SIZE); 3020 3021 if (priv == NULL) { 3022 DPAA2_SEC_ERR("No memory for priv CTXT"); 3023 return -ENOMEM; 3024 } 3025 3026 priv->fle_pool = dev_priv->fle_pool; 3027 flc = &priv->flc_desc[0].flc; 3028 3029 /* find xfrm types */ 3030 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 3031 cipher_xform = &xform->cipher; 3032 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 3033 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 3034 session->ext_params.aead_ctxt.auth_cipher_text = true; 3035 cipher_xform = &xform->cipher; 3036 auth_xform = &xform->next->auth; 3037 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 3038 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 3039 session->ext_params.aead_ctxt.auth_cipher_text = false; 3040 cipher_xform = &xform->next->cipher; 3041 auth_xform = &xform->auth; 3042 } else { 3043 DPAA2_SEC_ERR("Invalid crypto type"); 3044 return -EINVAL; 3045 } 3046 3047 session->ctxt_type = DPAA2_SEC_PDCP; 3048 if (cipher_xform) { 3049 session->cipher_key.data = rte_zmalloc(NULL, 3050 cipher_xform->key.length, 3051 RTE_CACHE_LINE_SIZE); 3052 if (session->cipher_key.data == NULL && 3053 cipher_xform->key.length > 0) { 3054 DPAA2_SEC_ERR("No Memory for cipher key"); 3055 rte_free(priv); 3056 return -ENOMEM; 3057 } 3058 session->cipher_key.length = cipher_xform->key.length; 3059 memcpy(session->cipher_key.data, cipher_xform->key.data, 3060 cipher_xform->key.length); 3061 session->dir = 3062 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 3063 DIR_ENC : DIR_DEC; 3064 session->cipher_alg = cipher_xform->algo; 3065 } else { 3066 session->cipher_key.data = NULL; 3067 session->cipher_key.length = 0; 3068 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 3069 session->dir = DIR_ENC; 3070 } 3071 3072 session->pdcp.domain = pdcp_xform->domain; 3073 session->pdcp.bearer = pdcp_xform->bearer; 3074 session->pdcp.pkt_dir = pdcp_xform->pkt_dir; 3075 session->pdcp.sn_size = pdcp_xform->sn_size; 3076 session->pdcp.hfn = pdcp_xform->hfn; 3077 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold; 3078 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd; 3079 /* hfv ovd offset location is stored in iv.offset value*/ 3080 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset; 3081 3082 cipherdata.key = (size_t)session->cipher_key.data; 3083 cipherdata.keylen = session->cipher_key.length; 3084 cipherdata.key_enc_flags = 0; 3085 cipherdata.key_type = RTA_DATA_IMM; 3086 3087 switch (session->cipher_alg) { 3088 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 3089 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW; 3090 break; 3091 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 3092 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC; 3093 break; 3094 case RTE_CRYPTO_CIPHER_AES_CTR: 3095 cipherdata.algtype = PDCP_CIPHER_TYPE_AES; 3096 break; 3097 case RTE_CRYPTO_CIPHER_NULL: 3098 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL; 3099 break; 3100 default: 3101 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 3102 session->cipher_alg); 3103 goto out; 3104 } 3105 3106 if (auth_xform) { 3107 session->auth_key.data = rte_zmalloc(NULL, 3108 auth_xform->key.length, 3109 RTE_CACHE_LINE_SIZE); 3110 if (!session->auth_key.data && 3111 auth_xform->key.length > 0) { 3112 DPAA2_SEC_ERR("No Memory for auth key"); 3113 rte_free(session->cipher_key.data); 3114 rte_free(priv); 3115 return -ENOMEM; 3116 } 3117 session->auth_key.length = auth_xform->key.length; 3118 memcpy(session->auth_key.data, auth_xform->key.data, 3119 auth_xform->key.length); 3120 session->auth_alg = auth_xform->algo; 3121 } else { 3122 session->auth_key.data = NULL; 3123 session->auth_key.length = 0; 3124 session->auth_alg = 0; 3125 } 3126 authdata.key = (size_t)session->auth_key.data; 3127 authdata.keylen = session->auth_key.length; 3128 authdata.key_enc_flags = 0; 3129 authdata.key_type = RTA_DATA_IMM; 3130 3131 if (session->auth_alg) { 3132 switch (session->auth_alg) { 3133 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 3134 authdata.algtype = PDCP_AUTH_TYPE_SNOW; 3135 break; 3136 case RTE_CRYPTO_AUTH_ZUC_EIA3: 3137 authdata.algtype = PDCP_AUTH_TYPE_ZUC; 3138 break; 3139 case RTE_CRYPTO_AUTH_AES_CMAC: 3140 authdata.algtype = PDCP_AUTH_TYPE_AES; 3141 break; 3142 case RTE_CRYPTO_AUTH_NULL: 3143 authdata.algtype = PDCP_AUTH_TYPE_NULL; 3144 break; 3145 default: 3146 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 3147 session->auth_alg); 3148 goto out; 3149 } 3150 3151 p_authdata = &authdata; 3152 } else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 3153 DPAA2_SEC_ERR("Crypto: Integrity must for c-plane"); 3154 goto out; 3155 } 3156 3157 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 3158 if (session->dir == DIR_ENC) 3159 bufsize = cnstr_shdsc_pdcp_c_plane_encap( 3160 priv->flc_desc[0].desc, 1, swap, 3161 pdcp_xform->hfn, 3162 session->pdcp.sn_size, 3163 pdcp_xform->bearer, 3164 pdcp_xform->pkt_dir, 3165 pdcp_xform->hfn_threshold, 3166 &cipherdata, &authdata, 3167 0); 3168 else if (session->dir == DIR_DEC) 3169 bufsize = cnstr_shdsc_pdcp_c_plane_decap( 3170 priv->flc_desc[0].desc, 1, swap, 3171 pdcp_xform->hfn, 3172 session->pdcp.sn_size, 3173 pdcp_xform->bearer, 3174 pdcp_xform->pkt_dir, 3175 pdcp_xform->hfn_threshold, 3176 &cipherdata, &authdata, 3177 0); 3178 } else { 3179 if (session->dir == DIR_ENC) 3180 bufsize = cnstr_shdsc_pdcp_u_plane_encap( 3181 priv->flc_desc[0].desc, 1, swap, 3182 session->pdcp.sn_size, 3183 pdcp_xform->hfn, 3184 pdcp_xform->bearer, 3185 pdcp_xform->pkt_dir, 3186 pdcp_xform->hfn_threshold, 3187 &cipherdata, p_authdata, 0); 3188 else if (session->dir == DIR_DEC) 3189 bufsize = cnstr_shdsc_pdcp_u_plane_decap( 3190 priv->flc_desc[0].desc, 1, swap, 3191 session->pdcp.sn_size, 3192 pdcp_xform->hfn, 3193 pdcp_xform->bearer, 3194 pdcp_xform->pkt_dir, 3195 pdcp_xform->hfn_threshold, 3196 &cipherdata, p_authdata, 0); 3197 } 3198 3199 if (bufsize < 0) { 3200 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 3201 goto out; 3202 } 3203 3204 /* Enable the stashing control bit */ 3205 DPAA2_SET_FLC_RSC(flc); 3206 flc->word2_rflc_31_0 = lower_32_bits( 3207 (size_t)&(((struct dpaa2_sec_qp *) 3208 dev->data->queue_pairs[0])->rx_vq) | 0x14); 3209 flc->word3_rflc_63_32 = upper_32_bits( 3210 (size_t)&(((struct dpaa2_sec_qp *) 3211 dev->data->queue_pairs[0])->rx_vq)); 3212 3213 flc->word1_sdl = (uint8_t)bufsize; 3214 3215 /* TODO - check the perf impact or 3216 * align as per descriptor type 3217 * Set EWS bit i.e. enable write-safe 3218 * DPAA2_SET_FLC_EWS(flc); 3219 */ 3220 3221 /* Set BS = 1 i.e reuse input buffers as output buffers */ 3222 DPAA2_SET_FLC_REUSE_BS(flc); 3223 /* Set FF = 10; reuse input buffers if they provide sufficient space */ 3224 DPAA2_SET_FLC_REUSE_FF(flc); 3225 3226 session->ctxt = priv; 3227 3228 return 0; 3229 out: 3230 rte_free(session->auth_key.data); 3231 rte_free(session->cipher_key.data); 3232 rte_free(priv); 3233 return -EINVAL; 3234 } 3235 3236 static int 3237 dpaa2_sec_security_session_create(void *dev, 3238 struct rte_security_session_conf *conf, 3239 struct rte_security_session *sess, 3240 struct rte_mempool *mempool) 3241 { 3242 void *sess_private_data; 3243 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; 3244 int ret; 3245 3246 if (rte_mempool_get(mempool, &sess_private_data)) { 3247 DPAA2_SEC_ERR("Couldn't get object from session mempool"); 3248 return -ENOMEM; 3249 } 3250 3251 switch (conf->protocol) { 3252 case RTE_SECURITY_PROTOCOL_IPSEC: 3253 ret = dpaa2_sec_set_ipsec_session(cdev, conf, 3254 sess_private_data); 3255 break; 3256 case RTE_SECURITY_PROTOCOL_MACSEC: 3257 return -ENOTSUP; 3258 case RTE_SECURITY_PROTOCOL_PDCP: 3259 ret = dpaa2_sec_set_pdcp_session(cdev, conf, 3260 sess_private_data); 3261 break; 3262 default: 3263 return -EINVAL; 3264 } 3265 if (ret != 0) { 3266 DPAA2_SEC_ERR("Failed to configure session parameters"); 3267 /* Return session to mempool */ 3268 rte_mempool_put(mempool, sess_private_data); 3269 return ret; 3270 } 3271 3272 set_sec_session_private_data(sess, sess_private_data); 3273 3274 return ret; 3275 } 3276 3277 /** Clear the memory of session so it doesn't leave key material behind */ 3278 static int 3279 dpaa2_sec_security_session_destroy(void *dev __rte_unused, 3280 struct rte_security_session *sess) 3281 { 3282 PMD_INIT_FUNC_TRACE(); 3283 void *sess_priv = get_sec_session_private_data(sess); 3284 3285 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv; 3286 3287 if (sess_priv) { 3288 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 3289 3290 rte_free(s->ctxt); 3291 rte_free(s->cipher_key.data); 3292 rte_free(s->auth_key.data); 3293 memset(s, 0, sizeof(dpaa2_sec_session)); 3294 set_sec_session_private_data(sess, NULL); 3295 rte_mempool_put(sess_mp, sess_priv); 3296 } 3297 return 0; 3298 } 3299 #endif 3300 static int 3301 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev, 3302 struct rte_crypto_sym_xform *xform, 3303 struct rte_cryptodev_sym_session *sess, 3304 struct rte_mempool *mempool) 3305 { 3306 void *sess_private_data; 3307 int ret; 3308 3309 if (rte_mempool_get(mempool, &sess_private_data)) { 3310 DPAA2_SEC_ERR("Couldn't get object from session mempool"); 3311 return -ENOMEM; 3312 } 3313 3314 ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data); 3315 if (ret != 0) { 3316 DPAA2_SEC_ERR("Failed to configure session parameters"); 3317 /* Return session to mempool */ 3318 rte_mempool_put(mempool, sess_private_data); 3319 return ret; 3320 } 3321 3322 set_sym_session_private_data(sess, dev->driver_id, 3323 sess_private_data); 3324 3325 return 0; 3326 } 3327 3328 /** Clear the memory of session so it doesn't leave key material behind */ 3329 static void 3330 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev, 3331 struct rte_cryptodev_sym_session *sess) 3332 { 3333 PMD_INIT_FUNC_TRACE(); 3334 uint8_t index = dev->driver_id; 3335 void *sess_priv = get_sym_session_private_data(sess, index); 3336 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv; 3337 3338 if (sess_priv) { 3339 rte_free(s->ctxt); 3340 rte_free(s->cipher_key.data); 3341 rte_free(s->auth_key.data); 3342 memset(s, 0, sizeof(dpaa2_sec_session)); 3343 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 3344 set_sym_session_private_data(sess, index, NULL); 3345 rte_mempool_put(sess_mp, sess_priv); 3346 } 3347 } 3348 3349 static int 3350 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused, 3351 struct rte_cryptodev_config *config __rte_unused) 3352 { 3353 PMD_INIT_FUNC_TRACE(); 3354 3355 return 0; 3356 } 3357 3358 static int 3359 dpaa2_sec_dev_start(struct rte_cryptodev *dev) 3360 { 3361 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3362 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3363 struct dpseci_attr attr; 3364 struct dpaa2_queue *dpaa2_q; 3365 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3366 dev->data->queue_pairs; 3367 struct dpseci_rx_queue_attr rx_attr; 3368 struct dpseci_tx_queue_attr tx_attr; 3369 int ret, i; 3370 3371 PMD_INIT_FUNC_TRACE(); 3372 3373 memset(&attr, 0, sizeof(struct dpseci_attr)); 3374 3375 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token); 3376 if (ret) { 3377 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED", 3378 priv->hw_id); 3379 goto get_attr_failure; 3380 } 3381 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr); 3382 if (ret) { 3383 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC"); 3384 goto get_attr_failure; 3385 } 3386 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) { 3387 dpaa2_q = &qp[i]->rx_vq; 3388 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 3389 &rx_attr); 3390 dpaa2_q->fqid = rx_attr.fqid; 3391 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid); 3392 } 3393 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) { 3394 dpaa2_q = &qp[i]->tx_vq; 3395 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 3396 &tx_attr); 3397 dpaa2_q->fqid = tx_attr.fqid; 3398 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid); 3399 } 3400 3401 return 0; 3402 get_attr_failure: 3403 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 3404 return -1; 3405 } 3406 3407 static void 3408 dpaa2_sec_dev_stop(struct rte_cryptodev *dev) 3409 { 3410 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3411 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3412 int ret; 3413 3414 PMD_INIT_FUNC_TRACE(); 3415 3416 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 3417 if (ret) { 3418 DPAA2_SEC_ERR("Failure in disabling dpseci %d device", 3419 priv->hw_id); 3420 return; 3421 } 3422 3423 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token); 3424 if (ret < 0) { 3425 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret); 3426 return; 3427 } 3428 } 3429 3430 static int 3431 dpaa2_sec_dev_close(struct rte_cryptodev *dev) 3432 { 3433 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3434 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3435 int ret; 3436 3437 PMD_INIT_FUNC_TRACE(); 3438 3439 /* Function is reverse of dpaa2_sec_dev_init. 3440 * It does the following: 3441 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id 3442 * 2. Close the DPSECI device 3443 * 3. Free the allocated resources. 3444 */ 3445 3446 /*Close the device at underlying layer*/ 3447 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token); 3448 if (ret) { 3449 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret); 3450 return -1; 3451 } 3452 3453 /*Free the allocated memory for ethernet private data and dpseci*/ 3454 priv->hw = NULL; 3455 rte_free(dpseci); 3456 3457 return 0; 3458 } 3459 3460 static void 3461 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev, 3462 struct rte_cryptodev_info *info) 3463 { 3464 struct dpaa2_sec_dev_private *internals = dev->data->dev_private; 3465 3466 PMD_INIT_FUNC_TRACE(); 3467 if (info != NULL) { 3468 info->max_nb_queue_pairs = internals->max_nb_queue_pairs; 3469 info->feature_flags = dev->feature_flags; 3470 info->capabilities = dpaa2_sec_capabilities; 3471 /* No limit of number of sessions */ 3472 info->sym.max_nb_sessions = 0; 3473 info->driver_id = cryptodev_driver_id; 3474 } 3475 } 3476 3477 static 3478 void dpaa2_sec_stats_get(struct rte_cryptodev *dev, 3479 struct rte_cryptodev_stats *stats) 3480 { 3481 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3482 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3483 struct dpseci_sec_counters counters = {0}; 3484 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3485 dev->data->queue_pairs; 3486 int ret, i; 3487 3488 PMD_INIT_FUNC_TRACE(); 3489 if (stats == NULL) { 3490 DPAA2_SEC_ERR("Invalid stats ptr NULL"); 3491 return; 3492 } 3493 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 3494 if (qp[i] == NULL) { 3495 DPAA2_SEC_DEBUG("Uninitialised queue pair"); 3496 continue; 3497 } 3498 3499 stats->enqueued_count += qp[i]->tx_vq.tx_pkts; 3500 stats->dequeued_count += qp[i]->rx_vq.rx_pkts; 3501 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts; 3502 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts; 3503 } 3504 3505 ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token, 3506 &counters); 3507 if (ret) { 3508 DPAA2_SEC_ERR("SEC counters failed"); 3509 } else { 3510 DPAA2_SEC_INFO("dpseci hardware stats:" 3511 "\n\tNum of Requests Dequeued = %" PRIu64 3512 "\n\tNum of Outbound Encrypt Requests = %" PRIu64 3513 "\n\tNum of Inbound Decrypt Requests = %" PRIu64 3514 "\n\tNum of Outbound Bytes Encrypted = %" PRIu64 3515 "\n\tNum of Outbound Bytes Protected = %" PRIu64 3516 "\n\tNum of Inbound Bytes Decrypted = %" PRIu64 3517 "\n\tNum of Inbound Bytes Validated = %" PRIu64, 3518 counters.dequeued_requests, 3519 counters.ob_enc_requests, 3520 counters.ib_dec_requests, 3521 counters.ob_enc_bytes, 3522 counters.ob_prot_bytes, 3523 counters.ib_dec_bytes, 3524 counters.ib_valid_bytes); 3525 } 3526 } 3527 3528 static 3529 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev) 3530 { 3531 int i; 3532 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3533 (dev->data->queue_pairs); 3534 3535 PMD_INIT_FUNC_TRACE(); 3536 3537 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 3538 if (qp[i] == NULL) { 3539 DPAA2_SEC_DEBUG("Uninitialised queue pair"); 3540 continue; 3541 } 3542 qp[i]->tx_vq.rx_pkts = 0; 3543 qp[i]->tx_vq.tx_pkts = 0; 3544 qp[i]->tx_vq.err_pkts = 0; 3545 qp[i]->rx_vq.rx_pkts = 0; 3546 qp[i]->rx_vq.tx_pkts = 0; 3547 qp[i]->rx_vq.err_pkts = 0; 3548 } 3549 } 3550 3551 static void __rte_hot 3552 dpaa2_sec_process_parallel_event(struct qbman_swp *swp, 3553 const struct qbman_fd *fd, 3554 const struct qbman_result *dq, 3555 struct dpaa2_queue *rxq, 3556 struct rte_event *ev) 3557 { 3558 /* Prefetching mbuf */ 3559 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)- 3560 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size)); 3561 3562 /* Prefetching ipsec crypto_op stored in priv data of mbuf */ 3563 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64)); 3564 3565 ev->flow_id = rxq->ev.flow_id; 3566 ev->sub_event_type = rxq->ev.sub_event_type; 3567 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3568 ev->op = RTE_EVENT_OP_NEW; 3569 ev->sched_type = rxq->ev.sched_type; 3570 ev->queue_id = rxq->ev.queue_id; 3571 ev->priority = rxq->ev.priority; 3572 ev->event_ptr = sec_fd_to_mbuf(fd); 3573 3574 qbman_swp_dqrr_consume(swp, dq); 3575 } 3576 static void 3577 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused, 3578 const struct qbman_fd *fd, 3579 const struct qbman_result *dq, 3580 struct dpaa2_queue *rxq, 3581 struct rte_event *ev) 3582 { 3583 uint8_t dqrr_index; 3584 struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr; 3585 /* Prefetching mbuf */ 3586 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)- 3587 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size)); 3588 3589 /* Prefetching ipsec crypto_op stored in priv data of mbuf */ 3590 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64)); 3591 3592 ev->flow_id = rxq->ev.flow_id; 3593 ev->sub_event_type = rxq->ev.sub_event_type; 3594 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3595 ev->op = RTE_EVENT_OP_NEW; 3596 ev->sched_type = rxq->ev.sched_type; 3597 ev->queue_id = rxq->ev.queue_id; 3598 ev->priority = rxq->ev.priority; 3599 3600 ev->event_ptr = sec_fd_to_mbuf(fd); 3601 dqrr_index = qbman_get_dqrr_idx(dq); 3602 crypto_op->sym->m_src->seqn = dqrr_index + 1; 3603 DPAA2_PER_LCORE_DQRR_SIZE++; 3604 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; 3605 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src; 3606 } 3607 3608 int 3609 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev, 3610 int qp_id, 3611 struct dpaa2_dpcon_dev *dpcon, 3612 const struct rte_event *event) 3613 { 3614 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3615 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3616 struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id]; 3617 struct dpseci_rx_queue_cfg cfg; 3618 uint8_t priority; 3619 int ret; 3620 3621 if (event->sched_type == RTE_SCHED_TYPE_PARALLEL) 3622 qp->rx_vq.cb = dpaa2_sec_process_parallel_event; 3623 else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) 3624 qp->rx_vq.cb = dpaa2_sec_process_atomic_event; 3625 else 3626 return -EINVAL; 3627 3628 priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / event->priority) * 3629 (dpcon->num_priorities - 1); 3630 3631 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 3632 cfg.options = DPSECI_QUEUE_OPT_DEST; 3633 cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON; 3634 cfg.dest_cfg.dest_id = dpcon->dpcon_id; 3635 cfg.dest_cfg.priority = priority; 3636 3637 cfg.options |= DPSECI_QUEUE_OPT_USER_CTX; 3638 cfg.user_ctx = (size_t)(qp); 3639 if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) { 3640 cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION; 3641 cfg.order_preservation_en = 1; 3642 } 3643 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 3644 qp_id, &cfg); 3645 if (ret) { 3646 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret); 3647 return ret; 3648 } 3649 3650 memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event)); 3651 3652 return 0; 3653 } 3654 3655 int 3656 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev, 3657 int qp_id) 3658 { 3659 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3660 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3661 struct dpseci_rx_queue_cfg cfg; 3662 int ret; 3663 3664 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 3665 cfg.options = DPSECI_QUEUE_OPT_DEST; 3666 cfg.dest_cfg.dest_type = DPSECI_DEST_NONE; 3667 3668 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 3669 qp_id, &cfg); 3670 if (ret) 3671 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret); 3672 3673 return ret; 3674 } 3675 3676 static struct rte_cryptodev_ops crypto_ops = { 3677 .dev_configure = dpaa2_sec_dev_configure, 3678 .dev_start = dpaa2_sec_dev_start, 3679 .dev_stop = dpaa2_sec_dev_stop, 3680 .dev_close = dpaa2_sec_dev_close, 3681 .dev_infos_get = dpaa2_sec_dev_infos_get, 3682 .stats_get = dpaa2_sec_stats_get, 3683 .stats_reset = dpaa2_sec_stats_reset, 3684 .queue_pair_setup = dpaa2_sec_queue_pair_setup, 3685 .queue_pair_release = dpaa2_sec_queue_pair_release, 3686 .sym_session_get_size = dpaa2_sec_sym_session_get_size, 3687 .sym_session_configure = dpaa2_sec_sym_session_configure, 3688 .sym_session_clear = dpaa2_sec_sym_session_clear, 3689 }; 3690 3691 #ifdef RTE_LIBRTE_SECURITY 3692 static const struct rte_security_capability * 3693 dpaa2_sec_capabilities_get(void *device __rte_unused) 3694 { 3695 return dpaa2_sec_security_cap; 3696 } 3697 3698 static const struct rte_security_ops dpaa2_sec_security_ops = { 3699 .session_create = dpaa2_sec_security_session_create, 3700 .session_update = NULL, 3701 .session_stats_get = NULL, 3702 .session_destroy = dpaa2_sec_security_session_destroy, 3703 .set_pkt_metadata = NULL, 3704 .capabilities_get = dpaa2_sec_capabilities_get 3705 }; 3706 #endif 3707 3708 static int 3709 dpaa2_sec_uninit(const struct rte_cryptodev *dev) 3710 { 3711 struct dpaa2_sec_dev_private *internals = dev->data->dev_private; 3712 3713 rte_free(dev->security_ctx); 3714 3715 rte_mempool_free(internals->fle_pool); 3716 3717 DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u", 3718 dev->data->name, rte_socket_id()); 3719 3720 return 0; 3721 } 3722 3723 static int 3724 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev) 3725 { 3726 struct dpaa2_sec_dev_private *internals; 3727 struct rte_device *dev = cryptodev->device; 3728 struct rte_dpaa2_device *dpaa2_dev; 3729 #ifdef RTE_LIBRTE_SECURITY 3730 struct rte_security_ctx *security_instance; 3731 #endif 3732 struct fsl_mc_io *dpseci; 3733 uint16_t token; 3734 struct dpseci_attr attr; 3735 int retcode, hw_id; 3736 char str[30]; 3737 3738 PMD_INIT_FUNC_TRACE(); 3739 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 3740 hw_id = dpaa2_dev->object_id; 3741 3742 cryptodev->driver_id = cryptodev_driver_id; 3743 cryptodev->dev_ops = &crypto_ops; 3744 3745 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst; 3746 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst; 3747 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 3748 RTE_CRYPTODEV_FF_HW_ACCELERATED | 3749 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 3750 RTE_CRYPTODEV_FF_SECURITY | 3751 RTE_CRYPTODEV_FF_IN_PLACE_SGL | 3752 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | 3753 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | 3754 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | 3755 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; 3756 3757 internals = cryptodev->data->dev_private; 3758 3759 /* 3760 * For secondary processes, we don't initialise any further as primary 3761 * has already done this work. Only check we don't need a different 3762 * RX function 3763 */ 3764 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 3765 DPAA2_SEC_DEBUG("Device already init by primary process"); 3766 return 0; 3767 } 3768 #ifdef RTE_LIBRTE_SECURITY 3769 /* Initialize security_ctx only for primary process*/ 3770 security_instance = rte_malloc("rte_security_instances_ops", 3771 sizeof(struct rte_security_ctx), 0); 3772 if (security_instance == NULL) 3773 return -ENOMEM; 3774 security_instance->device = (void *)cryptodev; 3775 security_instance->ops = &dpaa2_sec_security_ops; 3776 security_instance->sess_cnt = 0; 3777 cryptodev->security_ctx = security_instance; 3778 #endif 3779 /*Open the rte device via MC and save the handle for further use*/ 3780 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1, 3781 sizeof(struct fsl_mc_io), 0); 3782 if (!dpseci) { 3783 DPAA2_SEC_ERR( 3784 "Error in allocating the memory for dpsec object"); 3785 return -ENOMEM; 3786 } 3787 dpseci->regs = rte_mcp_ptr_list[0]; 3788 3789 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token); 3790 if (retcode != 0) { 3791 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x", 3792 retcode); 3793 goto init_error; 3794 } 3795 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr); 3796 if (retcode != 0) { 3797 DPAA2_SEC_ERR( 3798 "Cannot get dpsec device attributed: Error = %x", 3799 retcode); 3800 goto init_error; 3801 } 3802 snprintf(cryptodev->data->name, sizeof(cryptodev->data->name), 3803 "dpsec-%u", hw_id); 3804 3805 internals->max_nb_queue_pairs = attr.num_tx_queues; 3806 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs; 3807 internals->hw = dpseci; 3808 internals->token = token; 3809 3810 snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d", 3811 getpid(), cryptodev->data->dev_id); 3812 internals->fle_pool = rte_mempool_create((const char *)str, 3813 FLE_POOL_NUM_BUFS, 3814 FLE_POOL_BUF_SIZE, 3815 FLE_POOL_CACHE_SIZE, 0, 3816 NULL, NULL, NULL, NULL, 3817 SOCKET_ID_ANY, 0); 3818 if (!internals->fle_pool) { 3819 DPAA2_SEC_ERR("Mempool (%s) creation failed", str); 3820 goto init_error; 3821 } 3822 3823 DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name); 3824 return 0; 3825 3826 init_error: 3827 DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name); 3828 3829 /* dpaa2_sec_uninit(crypto_dev_name); */ 3830 return -EFAULT; 3831 } 3832 3833 static int 3834 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused, 3835 struct rte_dpaa2_device *dpaa2_dev) 3836 { 3837 struct rte_cryptodev *cryptodev; 3838 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 3839 3840 int retval; 3841 3842 snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d", 3843 dpaa2_dev->object_id); 3844 3845 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id()); 3846 if (cryptodev == NULL) 3847 return -ENOMEM; 3848 3849 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 3850 cryptodev->data->dev_private = rte_zmalloc_socket( 3851 "cryptodev private structure", 3852 sizeof(struct dpaa2_sec_dev_private), 3853 RTE_CACHE_LINE_SIZE, 3854 rte_socket_id()); 3855 3856 if (cryptodev->data->dev_private == NULL) 3857 rte_panic("Cannot allocate memzone for private " 3858 "device data"); 3859 } 3860 3861 dpaa2_dev->cryptodev = cryptodev; 3862 cryptodev->device = &dpaa2_dev->device; 3863 3864 /* init user callbacks */ 3865 TAILQ_INIT(&(cryptodev->link_intr_cbs)); 3866 3867 if (dpaa2_svr_family == SVR_LX2160A) 3868 rta_set_sec_era(RTA_SEC_ERA_10); 3869 3870 DPAA2_SEC_INFO("2-SEC ERA is %d", rta_get_sec_era()); 3871 3872 /* Invoke PMD device initialization function */ 3873 retval = dpaa2_sec_dev_init(cryptodev); 3874 if (retval == 0) 3875 return 0; 3876 3877 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 3878 rte_free(cryptodev->data->dev_private); 3879 3880 cryptodev->attached = RTE_CRYPTODEV_DETACHED; 3881 3882 return -ENXIO; 3883 } 3884 3885 static int 3886 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev) 3887 { 3888 struct rte_cryptodev *cryptodev; 3889 int ret; 3890 3891 cryptodev = dpaa2_dev->cryptodev; 3892 if (cryptodev == NULL) 3893 return -ENODEV; 3894 3895 ret = dpaa2_sec_uninit(cryptodev); 3896 if (ret) 3897 return ret; 3898 3899 return rte_cryptodev_pmd_destroy(cryptodev); 3900 } 3901 3902 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = { 3903 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA, 3904 .drv_type = DPAA2_CRYPTO, 3905 .driver = { 3906 .name = "DPAA2 SEC PMD" 3907 }, 3908 .probe = cryptodev_dpaa2_sec_probe, 3909 .remove = cryptodev_dpaa2_sec_remove, 3910 }; 3911 3912 static struct cryptodev_driver dpaa2_sec_crypto_drv; 3913 3914 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver); 3915 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, 3916 rte_dpaa2_sec_driver.driver, cryptodev_driver_id); 3917 3918 RTE_INIT(dpaa2_sec_init_log) 3919 { 3920 /* Bus level logs */ 3921 dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2"); 3922 if (dpaa2_logtype_sec >= 0) 3923 rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE); 3924 } 3925