1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2016-2020 NXP 5 * 6 */ 7 8 #include <time.h> 9 #include <net/if.h> 10 #include <unistd.h> 11 12 #include <rte_ip.h> 13 #include <rte_mbuf.h> 14 #include <rte_cryptodev.h> 15 #include <rte_malloc.h> 16 #include <rte_memcpy.h> 17 #include <rte_string_fns.h> 18 #include <rte_cycles.h> 19 #include <rte_kvargs.h> 20 #include <rte_dev.h> 21 #include <rte_cryptodev_pmd.h> 22 #include <rte_common.h> 23 #include <rte_fslmc.h> 24 #include <fslmc_vfio.h> 25 #include <dpaa2_hw_pvt.h> 26 #include <dpaa2_hw_dpio.h> 27 #include <dpaa2_hw_mempool.h> 28 #include <fsl_dpopr.h> 29 #include <fsl_dpseci.h> 30 #include <fsl_mc_sys.h> 31 32 #include "dpaa2_sec_priv.h" 33 #include "dpaa2_sec_event.h" 34 #include "dpaa2_sec_logs.h" 35 36 /* RTA header files */ 37 #include <desc/ipsec.h> 38 #include <desc/pdcp.h> 39 #include <desc/algo.h> 40 41 /* Minimum job descriptor consists of a oneword job descriptor HEADER and 42 * a pointer to the shared descriptor 43 */ 44 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ) 45 #define FSL_VENDOR_ID 0x1957 46 #define FSL_DEVICE_ID 0x410 47 #define FSL_SUBSYSTEM_SEC 1 48 #define FSL_MC_DPSECI_DEVID 3 49 50 #define NO_PREFETCH 0 51 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */ 52 #define FLE_POOL_NUM_BUFS 32000 53 #define FLE_POOL_BUF_SIZE 256 54 #define FLE_POOL_CACHE_SIZE 512 55 #define FLE_SG_MEM_SIZE(num) (FLE_POOL_BUF_SIZE + ((num) * 32)) 56 #define SEC_FLC_DHR_OUTBOUND -114 57 #define SEC_FLC_DHR_INBOUND 0 58 59 static uint8_t cryptodev_driver_id; 60 61 #ifdef RTE_LIBRTE_SECURITY 62 static inline int 63 build_proto_compound_sg_fd(dpaa2_sec_session *sess, 64 struct rte_crypto_op *op, 65 struct qbman_fd *fd, uint16_t bpid) 66 { 67 struct rte_crypto_sym_op *sym_op = op->sym; 68 struct ctxt_priv *priv = sess->ctxt; 69 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 70 struct sec_flow_context *flc; 71 struct rte_mbuf *mbuf; 72 uint32_t in_len = 0, out_len = 0; 73 74 if (sym_op->m_dst) 75 mbuf = sym_op->m_dst; 76 else 77 mbuf = sym_op->m_src; 78 79 /* first FLE entry used to store mbuf and session ctxt */ 80 fle = (struct qbman_fle *)rte_malloc(NULL, 81 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 82 RTE_CACHE_LINE_SIZE); 83 if (unlikely(!fle)) { 84 DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE"); 85 return -ENOMEM; 86 } 87 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 88 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 89 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 90 91 /* Save the shared descriptor */ 92 flc = &priv->flc_desc[0].flc; 93 94 op_fle = fle + 1; 95 ip_fle = fle + 2; 96 sge = fle + 3; 97 98 if (likely(bpid < MAX_BPID)) { 99 DPAA2_SET_FD_BPID(fd, bpid); 100 DPAA2_SET_FLE_BPID(op_fle, bpid); 101 DPAA2_SET_FLE_BPID(ip_fle, bpid); 102 } else { 103 DPAA2_SET_FD_IVP(fd); 104 DPAA2_SET_FLE_IVP(op_fle); 105 DPAA2_SET_FLE_IVP(ip_fle); 106 } 107 108 /* Configure FD as a FRAME LIST */ 109 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 110 DPAA2_SET_FD_COMPOUND_FMT(fd); 111 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 112 113 /* Configure Output FLE with Scatter/Gather Entry */ 114 DPAA2_SET_FLE_SG_EXT(op_fle); 115 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 116 117 /* Configure Output SGE for Encap/Decap */ 118 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 119 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 120 /* o/p segs */ 121 while (mbuf->next) { 122 sge->length = mbuf->data_len; 123 out_len += sge->length; 124 sge++; 125 mbuf = mbuf->next; 126 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 127 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 128 } 129 /* using buf_len for last buf - so that extra data can be added */ 130 sge->length = mbuf->buf_len - mbuf->data_off; 131 out_len += sge->length; 132 133 DPAA2_SET_FLE_FIN(sge); 134 op_fle->length = out_len; 135 136 sge++; 137 mbuf = sym_op->m_src; 138 139 /* Configure Input FLE with Scatter/Gather Entry */ 140 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 141 DPAA2_SET_FLE_SG_EXT(ip_fle); 142 DPAA2_SET_FLE_FIN(ip_fle); 143 144 /* Configure input SGE for Encap/Decap */ 145 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 146 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 147 sge->length = mbuf->data_len; 148 in_len += sge->length; 149 150 mbuf = mbuf->next; 151 /* i/p segs */ 152 while (mbuf) { 153 sge++; 154 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 155 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 156 sge->length = mbuf->data_len; 157 in_len += sge->length; 158 mbuf = mbuf->next; 159 } 160 ip_fle->length = in_len; 161 DPAA2_SET_FLE_FIN(sge); 162 163 /* In case of PDCP, per packet HFN is stored in 164 * mbuf priv after sym_op. 165 */ 166 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) { 167 uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op + 168 sess->pdcp.hfn_ovd_offset); 169 /*enable HFN override override */ 170 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd); 171 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd); 172 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd); 173 } 174 DPAA2_SET_FD_LEN(fd, ip_fle->length); 175 176 return 0; 177 } 178 179 static inline int 180 build_proto_compound_fd(dpaa2_sec_session *sess, 181 struct rte_crypto_op *op, 182 struct qbman_fd *fd, uint16_t bpid) 183 { 184 struct rte_crypto_sym_op *sym_op = op->sym; 185 struct ctxt_priv *priv = sess->ctxt; 186 struct qbman_fle *fle, *ip_fle, *op_fle; 187 struct sec_flow_context *flc; 188 struct rte_mbuf *src_mbuf = sym_op->m_src; 189 struct rte_mbuf *dst_mbuf = sym_op->m_dst; 190 int retval; 191 192 if (!dst_mbuf) 193 dst_mbuf = src_mbuf; 194 195 /* Save the shared descriptor */ 196 flc = &priv->flc_desc[0].flc; 197 198 /* we are using the first FLE entry to store Mbuf */ 199 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 200 if (retval) { 201 DPAA2_SEC_DP_ERR("Memory alloc failed"); 202 return -ENOMEM; 203 } 204 memset(fle, 0, FLE_POOL_BUF_SIZE); 205 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 206 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 207 208 op_fle = fle + 1; 209 ip_fle = fle + 2; 210 211 if (likely(bpid < MAX_BPID)) { 212 DPAA2_SET_FD_BPID(fd, bpid); 213 DPAA2_SET_FLE_BPID(op_fle, bpid); 214 DPAA2_SET_FLE_BPID(ip_fle, bpid); 215 } else { 216 DPAA2_SET_FD_IVP(fd); 217 DPAA2_SET_FLE_IVP(op_fle); 218 DPAA2_SET_FLE_IVP(ip_fle); 219 } 220 221 /* Configure FD as a FRAME LIST */ 222 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 223 DPAA2_SET_FD_COMPOUND_FMT(fd); 224 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 225 226 /* Configure Output FLE with dst mbuf data */ 227 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf)); 228 DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off); 229 DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len); 230 231 /* Configure Input FLE with src mbuf data */ 232 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf)); 233 DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off); 234 DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len); 235 236 DPAA2_SET_FD_LEN(fd, ip_fle->length); 237 DPAA2_SET_FLE_FIN(ip_fle); 238 239 /* In case of PDCP, per packet HFN is stored in 240 * mbuf priv after sym_op. 241 */ 242 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) { 243 uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op + 244 sess->pdcp.hfn_ovd_offset); 245 /*enable HFN override override */ 246 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd); 247 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd); 248 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd); 249 } 250 251 return 0; 252 253 } 254 255 static inline int 256 build_proto_fd(dpaa2_sec_session *sess, 257 struct rte_crypto_op *op, 258 struct qbman_fd *fd, uint16_t bpid) 259 { 260 struct rte_crypto_sym_op *sym_op = op->sym; 261 if (sym_op->m_dst) 262 return build_proto_compound_fd(sess, op, fd, bpid); 263 264 struct ctxt_priv *priv = sess->ctxt; 265 struct sec_flow_context *flc; 266 struct rte_mbuf *mbuf = sym_op->m_src; 267 268 if (likely(bpid < MAX_BPID)) 269 DPAA2_SET_FD_BPID(fd, bpid); 270 else 271 DPAA2_SET_FD_IVP(fd); 272 273 /* Save the shared descriptor */ 274 flc = &priv->flc_desc[0].flc; 275 276 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 277 DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off); 278 DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len); 279 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 280 281 /* save physical address of mbuf */ 282 op->sym->aead.digest.phys_addr = mbuf->buf_iova; 283 mbuf->buf_iova = (size_t)op; 284 285 return 0; 286 } 287 #endif 288 289 static inline int 290 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess, 291 struct rte_crypto_op *op, 292 struct qbman_fd *fd, __rte_unused uint16_t bpid) 293 { 294 struct rte_crypto_sym_op *sym_op = op->sym; 295 struct ctxt_priv *priv = sess->ctxt; 296 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 297 struct sec_flow_context *flc; 298 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len; 299 int icv_len = sess->digest_length; 300 uint8_t *old_icv; 301 struct rte_mbuf *mbuf; 302 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 303 sess->iv.offset); 304 305 if (sym_op->m_dst) 306 mbuf = sym_op->m_dst; 307 else 308 mbuf = sym_op->m_src; 309 310 /* first FLE entry used to store mbuf and session ctxt */ 311 fle = (struct qbman_fle *)rte_malloc(NULL, 312 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 313 RTE_CACHE_LINE_SIZE); 314 if (unlikely(!fle)) { 315 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE"); 316 return -ENOMEM; 317 } 318 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 319 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 320 DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv); 321 322 op_fle = fle + 1; 323 ip_fle = fle + 2; 324 sge = fle + 3; 325 326 /* Save the shared descriptor */ 327 flc = &priv->flc_desc[0].flc; 328 329 /* Configure FD as a FRAME LIST */ 330 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 331 DPAA2_SET_FD_COMPOUND_FMT(fd); 332 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 333 334 DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n" 335 "iv-len=%d data_off: 0x%x\n", 336 sym_op->aead.data.offset, 337 sym_op->aead.data.length, 338 sess->digest_length, 339 sess->iv.length, 340 sym_op->m_src->data_off); 341 342 /* Configure Output FLE with Scatter/Gather Entry */ 343 DPAA2_SET_FLE_SG_EXT(op_fle); 344 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 345 346 if (auth_only_len) 347 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len); 348 349 op_fle->length = (sess->dir == DIR_ENC) ? 350 (sym_op->aead.data.length + icv_len) : 351 sym_op->aead.data.length; 352 353 /* Configure Output SGE for Encap/Decap */ 354 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 355 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset); 356 sge->length = mbuf->data_len - sym_op->aead.data.offset; 357 358 mbuf = mbuf->next; 359 /* o/p segs */ 360 while (mbuf) { 361 sge++; 362 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 363 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 364 sge->length = mbuf->data_len; 365 mbuf = mbuf->next; 366 } 367 sge->length -= icv_len; 368 369 if (sess->dir == DIR_ENC) { 370 sge++; 371 DPAA2_SET_FLE_ADDR(sge, 372 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data)); 373 sge->length = icv_len; 374 } 375 DPAA2_SET_FLE_FIN(sge); 376 377 sge++; 378 mbuf = sym_op->m_src; 379 380 /* Configure Input FLE with Scatter/Gather Entry */ 381 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 382 DPAA2_SET_FLE_SG_EXT(ip_fle); 383 DPAA2_SET_FLE_FIN(ip_fle); 384 ip_fle->length = (sess->dir == DIR_ENC) ? 385 (sym_op->aead.data.length + sess->iv.length + auth_only_len) : 386 (sym_op->aead.data.length + sess->iv.length + auth_only_len + 387 icv_len); 388 389 /* Configure Input SGE for Encap/Decap */ 390 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr)); 391 sge->length = sess->iv.length; 392 393 sge++; 394 if (auth_only_len) { 395 DPAA2_SET_FLE_ADDR(sge, 396 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data)); 397 sge->length = auth_only_len; 398 sge++; 399 } 400 401 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 402 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + 403 mbuf->data_off); 404 sge->length = mbuf->data_len - sym_op->aead.data.offset; 405 406 mbuf = mbuf->next; 407 /* i/p segs */ 408 while (mbuf) { 409 sge++; 410 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 411 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 412 sge->length = mbuf->data_len; 413 mbuf = mbuf->next; 414 } 415 416 if (sess->dir == DIR_DEC) { 417 sge++; 418 old_icv = (uint8_t *)(sge + 1); 419 memcpy(old_icv, sym_op->aead.digest.data, icv_len); 420 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 421 sge->length = icv_len; 422 } 423 424 DPAA2_SET_FLE_FIN(sge); 425 if (auth_only_len) { 426 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len); 427 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 428 } 429 DPAA2_SET_FD_LEN(fd, ip_fle->length); 430 431 return 0; 432 } 433 434 static inline int 435 build_authenc_gcm_fd(dpaa2_sec_session *sess, 436 struct rte_crypto_op *op, 437 struct qbman_fd *fd, uint16_t bpid) 438 { 439 struct rte_crypto_sym_op *sym_op = op->sym; 440 struct ctxt_priv *priv = sess->ctxt; 441 struct qbman_fle *fle, *sge; 442 struct sec_flow_context *flc; 443 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len; 444 int icv_len = sess->digest_length, retval; 445 uint8_t *old_icv; 446 struct rte_mbuf *dst; 447 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 448 sess->iv.offset); 449 450 if (sym_op->m_dst) 451 dst = sym_op->m_dst; 452 else 453 dst = sym_op->m_src; 454 455 /* TODO we are using the first FLE entry to store Mbuf and session ctxt. 456 * Currently we donot know which FLE has the mbuf stored. 457 * So while retreiving we can go back 1 FLE from the FD -ADDR 458 * to get the MBUF Addr from the previous FLE. 459 * We can have a better approach to use the inline Mbuf 460 */ 461 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 462 if (retval) { 463 DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE"); 464 return -ENOMEM; 465 } 466 memset(fle, 0, FLE_POOL_BUF_SIZE); 467 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 468 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 469 fle = fle + 1; 470 sge = fle + 2; 471 if (likely(bpid < MAX_BPID)) { 472 DPAA2_SET_FD_BPID(fd, bpid); 473 DPAA2_SET_FLE_BPID(fle, bpid); 474 DPAA2_SET_FLE_BPID(fle + 1, bpid); 475 DPAA2_SET_FLE_BPID(sge, bpid); 476 DPAA2_SET_FLE_BPID(sge + 1, bpid); 477 DPAA2_SET_FLE_BPID(sge + 2, bpid); 478 DPAA2_SET_FLE_BPID(sge + 3, bpid); 479 } else { 480 DPAA2_SET_FD_IVP(fd); 481 DPAA2_SET_FLE_IVP(fle); 482 DPAA2_SET_FLE_IVP((fle + 1)); 483 DPAA2_SET_FLE_IVP(sge); 484 DPAA2_SET_FLE_IVP((sge + 1)); 485 DPAA2_SET_FLE_IVP((sge + 2)); 486 DPAA2_SET_FLE_IVP((sge + 3)); 487 } 488 489 /* Save the shared descriptor */ 490 flc = &priv->flc_desc[0].flc; 491 /* Configure FD as a FRAME LIST */ 492 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 493 DPAA2_SET_FD_COMPOUND_FMT(fd); 494 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 495 496 DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n" 497 "iv-len=%d data_off: 0x%x\n", 498 sym_op->aead.data.offset, 499 sym_op->aead.data.length, 500 sess->digest_length, 501 sess->iv.length, 502 sym_op->m_src->data_off); 503 504 /* Configure Output FLE with Scatter/Gather Entry */ 505 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 506 if (auth_only_len) 507 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 508 fle->length = (sess->dir == DIR_ENC) ? 509 (sym_op->aead.data.length + icv_len) : 510 sym_op->aead.data.length; 511 512 DPAA2_SET_FLE_SG_EXT(fle); 513 514 /* Configure Output SGE for Encap/Decap */ 515 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 516 DPAA2_SET_FLE_OFFSET(sge, dst->data_off + sym_op->aead.data.offset); 517 sge->length = sym_op->aead.data.length; 518 519 if (sess->dir == DIR_ENC) { 520 sge++; 521 DPAA2_SET_FLE_ADDR(sge, 522 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data)); 523 sge->length = sess->digest_length; 524 } 525 DPAA2_SET_FLE_FIN(sge); 526 527 sge++; 528 fle++; 529 530 /* Configure Input FLE with Scatter/Gather Entry */ 531 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 532 DPAA2_SET_FLE_SG_EXT(fle); 533 DPAA2_SET_FLE_FIN(fle); 534 fle->length = (sess->dir == DIR_ENC) ? 535 (sym_op->aead.data.length + sess->iv.length + auth_only_len) : 536 (sym_op->aead.data.length + sess->iv.length + auth_only_len + 537 sess->digest_length); 538 539 /* Configure Input SGE for Encap/Decap */ 540 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr)); 541 sge->length = sess->iv.length; 542 sge++; 543 if (auth_only_len) { 544 DPAA2_SET_FLE_ADDR(sge, 545 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data)); 546 sge->length = auth_only_len; 547 DPAA2_SET_FLE_BPID(sge, bpid); 548 sge++; 549 } 550 551 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 552 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + 553 sym_op->m_src->data_off); 554 sge->length = sym_op->aead.data.length; 555 if (sess->dir == DIR_DEC) { 556 sge++; 557 old_icv = (uint8_t *)(sge + 1); 558 memcpy(old_icv, sym_op->aead.digest.data, 559 sess->digest_length); 560 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 561 sge->length = sess->digest_length; 562 } 563 DPAA2_SET_FLE_FIN(sge); 564 565 if (auth_only_len) { 566 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 567 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 568 } 569 570 DPAA2_SET_FD_LEN(fd, fle->length); 571 return 0; 572 } 573 574 static inline int 575 build_authenc_sg_fd(dpaa2_sec_session *sess, 576 struct rte_crypto_op *op, 577 struct qbman_fd *fd, __rte_unused uint16_t bpid) 578 { 579 struct rte_crypto_sym_op *sym_op = op->sym; 580 struct ctxt_priv *priv = sess->ctxt; 581 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 582 struct sec_flow_context *flc; 583 uint16_t auth_hdr_len = sym_op->cipher.data.offset - 584 sym_op->auth.data.offset; 585 uint16_t auth_tail_len = sym_op->auth.data.length - 586 sym_op->cipher.data.length - auth_hdr_len; 587 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len; 588 int icv_len = sess->digest_length; 589 uint8_t *old_icv; 590 struct rte_mbuf *mbuf; 591 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 592 sess->iv.offset); 593 594 if (sym_op->m_dst) 595 mbuf = sym_op->m_dst; 596 else 597 mbuf = sym_op->m_src; 598 599 /* first FLE entry used to store mbuf and session ctxt */ 600 fle = (struct qbman_fle *)rte_malloc(NULL, 601 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 602 RTE_CACHE_LINE_SIZE); 603 if (unlikely(!fle)) { 604 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE"); 605 return -ENOMEM; 606 } 607 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 608 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 609 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 610 611 op_fle = fle + 1; 612 ip_fle = fle + 2; 613 sge = fle + 3; 614 615 /* Save the shared descriptor */ 616 flc = &priv->flc_desc[0].flc; 617 618 /* Configure FD as a FRAME LIST */ 619 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 620 DPAA2_SET_FD_COMPOUND_FMT(fd); 621 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 622 623 DPAA2_SEC_DP_DEBUG( 624 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n" 625 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", 626 sym_op->auth.data.offset, 627 sym_op->auth.data.length, 628 sess->digest_length, 629 sym_op->cipher.data.offset, 630 sym_op->cipher.data.length, 631 sess->iv.length, 632 sym_op->m_src->data_off); 633 634 /* Configure Output FLE with Scatter/Gather Entry */ 635 DPAA2_SET_FLE_SG_EXT(op_fle); 636 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 637 638 if (auth_only_len) 639 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len); 640 641 op_fle->length = (sess->dir == DIR_ENC) ? 642 (sym_op->cipher.data.length + icv_len) : 643 sym_op->cipher.data.length; 644 645 /* Configure Output SGE for Encap/Decap */ 646 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 647 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset); 648 sge->length = mbuf->data_len - sym_op->auth.data.offset; 649 650 mbuf = mbuf->next; 651 /* o/p segs */ 652 while (mbuf) { 653 sge++; 654 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 655 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 656 sge->length = mbuf->data_len; 657 mbuf = mbuf->next; 658 } 659 sge->length -= icv_len; 660 661 if (sess->dir == DIR_ENC) { 662 sge++; 663 DPAA2_SET_FLE_ADDR(sge, 664 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 665 sge->length = icv_len; 666 } 667 DPAA2_SET_FLE_FIN(sge); 668 669 sge++; 670 mbuf = sym_op->m_src; 671 672 /* Configure Input FLE with Scatter/Gather Entry */ 673 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 674 DPAA2_SET_FLE_SG_EXT(ip_fle); 675 DPAA2_SET_FLE_FIN(ip_fle); 676 ip_fle->length = (sess->dir == DIR_ENC) ? 677 (sym_op->auth.data.length + sess->iv.length) : 678 (sym_op->auth.data.length + sess->iv.length + 679 icv_len); 680 681 /* Configure Input SGE for Encap/Decap */ 682 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 683 sge->length = sess->iv.length; 684 685 sge++; 686 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 687 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + 688 mbuf->data_off); 689 sge->length = mbuf->data_len - sym_op->auth.data.offset; 690 691 mbuf = mbuf->next; 692 /* i/p segs */ 693 while (mbuf) { 694 sge++; 695 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 696 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 697 sge->length = mbuf->data_len; 698 mbuf = mbuf->next; 699 } 700 sge->length -= icv_len; 701 702 if (sess->dir == DIR_DEC) { 703 sge++; 704 old_icv = (uint8_t *)(sge + 1); 705 memcpy(old_icv, sym_op->auth.digest.data, 706 icv_len); 707 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 708 sge->length = icv_len; 709 } 710 711 DPAA2_SET_FLE_FIN(sge); 712 if (auth_only_len) { 713 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len); 714 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 715 } 716 DPAA2_SET_FD_LEN(fd, ip_fle->length); 717 718 return 0; 719 } 720 721 static inline int 722 build_authenc_fd(dpaa2_sec_session *sess, 723 struct rte_crypto_op *op, 724 struct qbman_fd *fd, uint16_t bpid) 725 { 726 struct rte_crypto_sym_op *sym_op = op->sym; 727 struct ctxt_priv *priv = sess->ctxt; 728 struct qbman_fle *fle, *sge; 729 struct sec_flow_context *flc; 730 uint16_t auth_hdr_len = sym_op->cipher.data.offset - 731 sym_op->auth.data.offset; 732 uint16_t auth_tail_len = sym_op->auth.data.length - 733 sym_op->cipher.data.length - auth_hdr_len; 734 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len; 735 736 int icv_len = sess->digest_length, retval; 737 uint8_t *old_icv; 738 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 739 sess->iv.offset); 740 struct rte_mbuf *dst; 741 742 if (sym_op->m_dst) 743 dst = sym_op->m_dst; 744 else 745 dst = sym_op->m_src; 746 747 /* we are using the first FLE entry to store Mbuf. 748 * Currently we donot know which FLE has the mbuf stored. 749 * So while retreiving we can go back 1 FLE from the FD -ADDR 750 * to get the MBUF Addr from the previous FLE. 751 * We can have a better approach to use the inline Mbuf 752 */ 753 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 754 if (retval) { 755 DPAA2_SEC_ERR("Memory alloc failed for SGE"); 756 return -ENOMEM; 757 } 758 memset(fle, 0, FLE_POOL_BUF_SIZE); 759 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 760 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 761 fle = fle + 1; 762 sge = fle + 2; 763 if (likely(bpid < MAX_BPID)) { 764 DPAA2_SET_FD_BPID(fd, bpid); 765 DPAA2_SET_FLE_BPID(fle, bpid); 766 DPAA2_SET_FLE_BPID(fle + 1, bpid); 767 DPAA2_SET_FLE_BPID(sge, bpid); 768 DPAA2_SET_FLE_BPID(sge + 1, bpid); 769 DPAA2_SET_FLE_BPID(sge + 2, bpid); 770 DPAA2_SET_FLE_BPID(sge + 3, bpid); 771 } else { 772 DPAA2_SET_FD_IVP(fd); 773 DPAA2_SET_FLE_IVP(fle); 774 DPAA2_SET_FLE_IVP((fle + 1)); 775 DPAA2_SET_FLE_IVP(sge); 776 DPAA2_SET_FLE_IVP((sge + 1)); 777 DPAA2_SET_FLE_IVP((sge + 2)); 778 DPAA2_SET_FLE_IVP((sge + 3)); 779 } 780 781 /* Save the shared descriptor */ 782 flc = &priv->flc_desc[0].flc; 783 /* Configure FD as a FRAME LIST */ 784 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 785 DPAA2_SET_FD_COMPOUND_FMT(fd); 786 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 787 788 DPAA2_SEC_DP_DEBUG( 789 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n" 790 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", 791 sym_op->auth.data.offset, 792 sym_op->auth.data.length, 793 sess->digest_length, 794 sym_op->cipher.data.offset, 795 sym_op->cipher.data.length, 796 sess->iv.length, 797 sym_op->m_src->data_off); 798 799 /* Configure Output FLE with Scatter/Gather Entry */ 800 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 801 if (auth_only_len) 802 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 803 fle->length = (sess->dir == DIR_ENC) ? 804 (sym_op->cipher.data.length + icv_len) : 805 sym_op->cipher.data.length; 806 807 DPAA2_SET_FLE_SG_EXT(fle); 808 809 /* Configure Output SGE for Encap/Decap */ 810 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 811 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + 812 dst->data_off); 813 sge->length = sym_op->cipher.data.length; 814 815 if (sess->dir == DIR_ENC) { 816 sge++; 817 DPAA2_SET_FLE_ADDR(sge, 818 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 819 sge->length = sess->digest_length; 820 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 821 sess->iv.length)); 822 } 823 DPAA2_SET_FLE_FIN(sge); 824 825 sge++; 826 fle++; 827 828 /* Configure Input FLE with Scatter/Gather Entry */ 829 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 830 DPAA2_SET_FLE_SG_EXT(fle); 831 DPAA2_SET_FLE_FIN(fle); 832 fle->length = (sess->dir == DIR_ENC) ? 833 (sym_op->auth.data.length + sess->iv.length) : 834 (sym_op->auth.data.length + sess->iv.length + 835 sess->digest_length); 836 837 /* Configure Input SGE for Encap/Decap */ 838 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 839 sge->length = sess->iv.length; 840 sge++; 841 842 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 843 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + 844 sym_op->m_src->data_off); 845 sge->length = sym_op->auth.data.length; 846 if (sess->dir == DIR_DEC) { 847 sge++; 848 old_icv = (uint8_t *)(sge + 1); 849 memcpy(old_icv, sym_op->auth.digest.data, 850 sess->digest_length); 851 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 852 sge->length = sess->digest_length; 853 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 854 sess->digest_length + 855 sess->iv.length)); 856 } 857 DPAA2_SET_FLE_FIN(sge); 858 if (auth_only_len) { 859 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 860 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 861 } 862 return 0; 863 } 864 865 static inline int build_auth_sg_fd( 866 dpaa2_sec_session *sess, 867 struct rte_crypto_op *op, 868 struct qbman_fd *fd, 869 __rte_unused uint16_t bpid) 870 { 871 struct rte_crypto_sym_op *sym_op = op->sym; 872 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 873 struct sec_flow_context *flc; 874 struct ctxt_priv *priv = sess->ctxt; 875 int data_len, data_offset; 876 uint8_t *old_digest; 877 struct rte_mbuf *mbuf; 878 879 data_len = sym_op->auth.data.length; 880 data_offset = sym_op->auth.data.offset; 881 882 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 883 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 884 if ((data_len & 7) || (data_offset & 7)) { 885 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes"); 886 return -ENOTSUP; 887 } 888 889 data_len = data_len >> 3; 890 data_offset = data_offset >> 3; 891 } 892 893 mbuf = sym_op->m_src; 894 fle = (struct qbman_fle *)rte_malloc(NULL, 895 FLE_SG_MEM_SIZE(mbuf->nb_segs), 896 RTE_CACHE_LINE_SIZE); 897 if (unlikely(!fle)) { 898 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE"); 899 return -ENOMEM; 900 } 901 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs)); 902 /* first FLE entry used to store mbuf and session ctxt */ 903 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 904 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 905 op_fle = fle + 1; 906 ip_fle = fle + 2; 907 sge = fle + 3; 908 909 flc = &priv->flc_desc[DESC_INITFINAL].flc; 910 /* sg FD */ 911 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 912 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 913 DPAA2_SET_FD_COMPOUND_FMT(fd); 914 915 /* o/p fle */ 916 DPAA2_SET_FLE_ADDR(op_fle, 917 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 918 op_fle->length = sess->digest_length; 919 920 /* i/p fle */ 921 DPAA2_SET_FLE_SG_EXT(ip_fle); 922 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 923 ip_fle->length = data_len; 924 925 if (sess->iv.length) { 926 uint8_t *iv_ptr; 927 928 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 929 sess->iv.offset); 930 931 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 932 iv_ptr = conv_to_snow_f9_iv(iv_ptr); 933 sge->length = 12; 934 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 935 iv_ptr = conv_to_zuc_eia_iv(iv_ptr); 936 sge->length = 8; 937 } else { 938 sge->length = sess->iv.length; 939 } 940 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 941 ip_fle->length += sge->length; 942 sge++; 943 } 944 /* i/p 1st seg */ 945 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 946 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off); 947 948 if (data_len <= (mbuf->data_len - data_offset)) { 949 sge->length = data_len; 950 data_len = 0; 951 } else { 952 sge->length = mbuf->data_len - data_offset; 953 954 /* remaining i/p segs */ 955 while ((data_len = data_len - sge->length) && 956 (mbuf = mbuf->next)) { 957 sge++; 958 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 959 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 960 if (data_len > mbuf->data_len) 961 sge->length = mbuf->data_len; 962 else 963 sge->length = data_len; 964 } 965 } 966 967 if (sess->dir == DIR_DEC) { 968 /* Digest verification case */ 969 sge++; 970 old_digest = (uint8_t *)(sge + 1); 971 rte_memcpy(old_digest, sym_op->auth.digest.data, 972 sess->digest_length); 973 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest)); 974 sge->length = sess->digest_length; 975 ip_fle->length += sess->digest_length; 976 } 977 DPAA2_SET_FLE_FIN(sge); 978 DPAA2_SET_FLE_FIN(ip_fle); 979 DPAA2_SET_FD_LEN(fd, ip_fle->length); 980 981 return 0; 982 } 983 984 static inline int 985 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 986 struct qbman_fd *fd, uint16_t bpid) 987 { 988 struct rte_crypto_sym_op *sym_op = op->sym; 989 struct qbman_fle *fle, *sge; 990 struct sec_flow_context *flc; 991 struct ctxt_priv *priv = sess->ctxt; 992 int data_len, data_offset; 993 uint8_t *old_digest; 994 int retval; 995 996 data_len = sym_op->auth.data.length; 997 data_offset = sym_op->auth.data.offset; 998 999 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 1000 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 1001 if ((data_len & 7) || (data_offset & 7)) { 1002 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes"); 1003 return -ENOTSUP; 1004 } 1005 1006 data_len = data_len >> 3; 1007 data_offset = data_offset >> 3; 1008 } 1009 1010 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 1011 if (retval) { 1012 DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE"); 1013 return -ENOMEM; 1014 } 1015 memset(fle, 0, FLE_POOL_BUF_SIZE); 1016 /* TODO we are using the first FLE entry to store Mbuf. 1017 * Currently we donot know which FLE has the mbuf stored. 1018 * So while retreiving we can go back 1 FLE from the FD -ADDR 1019 * to get the MBUF Addr from the previous FLE. 1020 * We can have a better approach to use the inline Mbuf 1021 */ 1022 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 1023 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 1024 fle = fle + 1; 1025 sge = fle + 2; 1026 1027 if (likely(bpid < MAX_BPID)) { 1028 DPAA2_SET_FD_BPID(fd, bpid); 1029 DPAA2_SET_FLE_BPID(fle, bpid); 1030 DPAA2_SET_FLE_BPID(fle + 1, bpid); 1031 DPAA2_SET_FLE_BPID(sge, bpid); 1032 DPAA2_SET_FLE_BPID(sge + 1, bpid); 1033 } else { 1034 DPAA2_SET_FD_IVP(fd); 1035 DPAA2_SET_FLE_IVP(fle); 1036 DPAA2_SET_FLE_IVP((fle + 1)); 1037 DPAA2_SET_FLE_IVP(sge); 1038 DPAA2_SET_FLE_IVP((sge + 1)); 1039 } 1040 1041 flc = &priv->flc_desc[DESC_INITFINAL].flc; 1042 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1043 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 1044 DPAA2_SET_FD_COMPOUND_FMT(fd); 1045 1046 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 1047 fle->length = sess->digest_length; 1048 fle++; 1049 1050 /* Setting input FLE */ 1051 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 1052 DPAA2_SET_FLE_SG_EXT(fle); 1053 fle->length = data_len; 1054 1055 if (sess->iv.length) { 1056 uint8_t *iv_ptr; 1057 1058 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1059 sess->iv.offset); 1060 1061 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 1062 iv_ptr = conv_to_snow_f9_iv(iv_ptr); 1063 sge->length = 12; 1064 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 1065 iv_ptr = conv_to_zuc_eia_iv(iv_ptr); 1066 sge->length = 8; 1067 } else { 1068 sge->length = sess->iv.length; 1069 } 1070 1071 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1072 fle->length = fle->length + sge->length; 1073 sge++; 1074 } 1075 1076 /* Setting data to authenticate */ 1077 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 1078 DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off); 1079 sge->length = data_len; 1080 1081 if (sess->dir == DIR_DEC) { 1082 sge++; 1083 old_digest = (uint8_t *)(sge + 1); 1084 rte_memcpy(old_digest, sym_op->auth.digest.data, 1085 sess->digest_length); 1086 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest)); 1087 sge->length = sess->digest_length; 1088 fle->length = fle->length + sess->digest_length; 1089 } 1090 1091 DPAA2_SET_FLE_FIN(sge); 1092 DPAA2_SET_FLE_FIN(fle); 1093 DPAA2_SET_FD_LEN(fd, fle->length); 1094 1095 return 0; 1096 } 1097 1098 static int 1099 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 1100 struct qbman_fd *fd, __rte_unused uint16_t bpid) 1101 { 1102 struct rte_crypto_sym_op *sym_op = op->sym; 1103 struct qbman_fle *ip_fle, *op_fle, *sge, *fle; 1104 int data_len, data_offset; 1105 struct sec_flow_context *flc; 1106 struct ctxt_priv *priv = sess->ctxt; 1107 struct rte_mbuf *mbuf; 1108 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1109 sess->iv.offset); 1110 1111 data_len = sym_op->cipher.data.length; 1112 data_offset = sym_op->cipher.data.offset; 1113 1114 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 1115 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 1116 if ((data_len & 7) || (data_offset & 7)) { 1117 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes"); 1118 return -ENOTSUP; 1119 } 1120 1121 data_len = data_len >> 3; 1122 data_offset = data_offset >> 3; 1123 } 1124 1125 if (sym_op->m_dst) 1126 mbuf = sym_op->m_dst; 1127 else 1128 mbuf = sym_op->m_src; 1129 1130 /* first FLE entry used to store mbuf and session ctxt */ 1131 fle = (struct qbman_fle *)rte_malloc(NULL, 1132 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 1133 RTE_CACHE_LINE_SIZE); 1134 if (!fle) { 1135 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE"); 1136 return -ENOMEM; 1137 } 1138 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 1139 /* first FLE entry used to store mbuf and session ctxt */ 1140 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 1141 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 1142 1143 op_fle = fle + 1; 1144 ip_fle = fle + 2; 1145 sge = fle + 3; 1146 1147 flc = &priv->flc_desc[0].flc; 1148 1149 DPAA2_SEC_DP_DEBUG( 1150 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d" 1151 " data_off: 0x%x\n", 1152 data_offset, 1153 data_len, 1154 sess->iv.length, 1155 sym_op->m_src->data_off); 1156 1157 /* o/p fle */ 1158 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 1159 op_fle->length = data_len; 1160 DPAA2_SET_FLE_SG_EXT(op_fle); 1161 1162 /* o/p 1st seg */ 1163 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1164 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off); 1165 sge->length = mbuf->data_len - data_offset; 1166 1167 mbuf = mbuf->next; 1168 /* o/p segs */ 1169 while (mbuf) { 1170 sge++; 1171 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1172 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 1173 sge->length = mbuf->data_len; 1174 mbuf = mbuf->next; 1175 } 1176 DPAA2_SET_FLE_FIN(sge); 1177 1178 DPAA2_SEC_DP_DEBUG( 1179 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n", 1180 flc, fle, fle->addr_hi, fle->addr_lo, 1181 fle->length); 1182 1183 /* i/p fle */ 1184 mbuf = sym_op->m_src; 1185 sge++; 1186 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 1187 ip_fle->length = sess->iv.length + data_len; 1188 DPAA2_SET_FLE_SG_EXT(ip_fle); 1189 1190 /* i/p IV */ 1191 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1192 DPAA2_SET_FLE_OFFSET(sge, 0); 1193 sge->length = sess->iv.length; 1194 1195 sge++; 1196 1197 /* i/p 1st seg */ 1198 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1199 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off); 1200 sge->length = mbuf->data_len - data_offset; 1201 1202 mbuf = mbuf->next; 1203 /* i/p segs */ 1204 while (mbuf) { 1205 sge++; 1206 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1207 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 1208 sge->length = mbuf->data_len; 1209 mbuf = mbuf->next; 1210 } 1211 DPAA2_SET_FLE_FIN(sge); 1212 DPAA2_SET_FLE_FIN(ip_fle); 1213 1214 /* sg fd */ 1215 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 1216 DPAA2_SET_FD_LEN(fd, ip_fle->length); 1217 DPAA2_SET_FD_COMPOUND_FMT(fd); 1218 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1219 1220 DPAA2_SEC_DP_DEBUG( 1221 "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d" 1222 " off =%d, len =%d\n", 1223 DPAA2_GET_FD_ADDR(fd), 1224 DPAA2_GET_FD_BPID(fd), 1225 rte_dpaa2_bpid_info[bpid].meta_data_size, 1226 DPAA2_GET_FD_OFFSET(fd), 1227 DPAA2_GET_FD_LEN(fd)); 1228 return 0; 1229 } 1230 1231 static int 1232 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 1233 struct qbman_fd *fd, uint16_t bpid) 1234 { 1235 struct rte_crypto_sym_op *sym_op = op->sym; 1236 struct qbman_fle *fle, *sge; 1237 int retval, data_len, data_offset; 1238 struct sec_flow_context *flc; 1239 struct ctxt_priv *priv = sess->ctxt; 1240 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1241 sess->iv.offset); 1242 struct rte_mbuf *dst; 1243 1244 data_len = sym_op->cipher.data.length; 1245 data_offset = sym_op->cipher.data.offset; 1246 1247 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 1248 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 1249 if ((data_len & 7) || (data_offset & 7)) { 1250 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes"); 1251 return -ENOTSUP; 1252 } 1253 1254 data_len = data_len >> 3; 1255 data_offset = data_offset >> 3; 1256 } 1257 1258 if (sym_op->m_dst) 1259 dst = sym_op->m_dst; 1260 else 1261 dst = sym_op->m_src; 1262 1263 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 1264 if (retval) { 1265 DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE"); 1266 return -ENOMEM; 1267 } 1268 memset(fle, 0, FLE_POOL_BUF_SIZE); 1269 /* TODO we are using the first FLE entry to store Mbuf. 1270 * Currently we donot know which FLE has the mbuf stored. 1271 * So while retreiving we can go back 1 FLE from the FD -ADDR 1272 * to get the MBUF Addr from the previous FLE. 1273 * We can have a better approach to use the inline Mbuf 1274 */ 1275 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 1276 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 1277 fle = fle + 1; 1278 sge = fle + 2; 1279 1280 if (likely(bpid < MAX_BPID)) { 1281 DPAA2_SET_FD_BPID(fd, bpid); 1282 DPAA2_SET_FLE_BPID(fle, bpid); 1283 DPAA2_SET_FLE_BPID(fle + 1, bpid); 1284 DPAA2_SET_FLE_BPID(sge, bpid); 1285 DPAA2_SET_FLE_BPID(sge + 1, bpid); 1286 } else { 1287 DPAA2_SET_FD_IVP(fd); 1288 DPAA2_SET_FLE_IVP(fle); 1289 DPAA2_SET_FLE_IVP((fle + 1)); 1290 DPAA2_SET_FLE_IVP(sge); 1291 DPAA2_SET_FLE_IVP((sge + 1)); 1292 } 1293 1294 flc = &priv->flc_desc[0].flc; 1295 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 1296 DPAA2_SET_FD_LEN(fd, data_len + sess->iv.length); 1297 DPAA2_SET_FD_COMPOUND_FMT(fd); 1298 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1299 1300 DPAA2_SEC_DP_DEBUG( 1301 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d," 1302 " data_off: 0x%x\n", 1303 data_offset, 1304 data_len, 1305 sess->iv.length, 1306 sym_op->m_src->data_off); 1307 1308 DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 1309 DPAA2_SET_FLE_OFFSET(fle, data_offset + dst->data_off); 1310 1311 fle->length = data_len + sess->iv.length; 1312 1313 DPAA2_SEC_DP_DEBUG( 1314 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n", 1315 flc, fle, fle->addr_hi, fle->addr_lo, 1316 fle->length); 1317 1318 fle++; 1319 1320 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 1321 fle->length = data_len + sess->iv.length; 1322 1323 DPAA2_SET_FLE_SG_EXT(fle); 1324 1325 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1326 sge->length = sess->iv.length; 1327 1328 sge++; 1329 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 1330 DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off); 1331 1332 sge->length = data_len; 1333 DPAA2_SET_FLE_FIN(sge); 1334 DPAA2_SET_FLE_FIN(fle); 1335 1336 DPAA2_SEC_DP_DEBUG( 1337 "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d" 1338 " off =%d, len =%d\n", 1339 DPAA2_GET_FD_ADDR(fd), 1340 DPAA2_GET_FD_BPID(fd), 1341 rte_dpaa2_bpid_info[bpid].meta_data_size, 1342 DPAA2_GET_FD_OFFSET(fd), 1343 DPAA2_GET_FD_LEN(fd)); 1344 1345 return 0; 1346 } 1347 1348 static inline int 1349 build_sec_fd(struct rte_crypto_op *op, 1350 struct qbman_fd *fd, uint16_t bpid) 1351 { 1352 int ret = -1; 1353 dpaa2_sec_session *sess; 1354 1355 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) 1356 sess = (dpaa2_sec_session *)get_sym_session_private_data( 1357 op->sym->session, cryptodev_driver_id); 1358 #ifdef RTE_LIBRTE_SECURITY 1359 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) 1360 sess = (dpaa2_sec_session *)get_sec_session_private_data( 1361 op->sym->sec_session); 1362 #endif 1363 else 1364 return -ENOTSUP; 1365 1366 if (!sess) 1367 return -EINVAL; 1368 1369 /* Any of the buffer is segmented*/ 1370 if (!rte_pktmbuf_is_contiguous(op->sym->m_src) || 1371 ((op->sym->m_dst != NULL) && 1372 !rte_pktmbuf_is_contiguous(op->sym->m_dst))) { 1373 switch (sess->ctxt_type) { 1374 case DPAA2_SEC_CIPHER: 1375 ret = build_cipher_sg_fd(sess, op, fd, bpid); 1376 break; 1377 case DPAA2_SEC_AUTH: 1378 ret = build_auth_sg_fd(sess, op, fd, bpid); 1379 break; 1380 case DPAA2_SEC_AEAD: 1381 ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid); 1382 break; 1383 case DPAA2_SEC_CIPHER_HASH: 1384 ret = build_authenc_sg_fd(sess, op, fd, bpid); 1385 break; 1386 #ifdef RTE_LIBRTE_SECURITY 1387 case DPAA2_SEC_IPSEC: 1388 case DPAA2_SEC_PDCP: 1389 ret = build_proto_compound_sg_fd(sess, op, fd, bpid); 1390 break; 1391 #endif 1392 case DPAA2_SEC_HASH_CIPHER: 1393 default: 1394 DPAA2_SEC_ERR("error: Unsupported session"); 1395 } 1396 } else { 1397 switch (sess->ctxt_type) { 1398 case DPAA2_SEC_CIPHER: 1399 ret = build_cipher_fd(sess, op, fd, bpid); 1400 break; 1401 case DPAA2_SEC_AUTH: 1402 ret = build_auth_fd(sess, op, fd, bpid); 1403 break; 1404 case DPAA2_SEC_AEAD: 1405 ret = build_authenc_gcm_fd(sess, op, fd, bpid); 1406 break; 1407 case DPAA2_SEC_CIPHER_HASH: 1408 ret = build_authenc_fd(sess, op, fd, bpid); 1409 break; 1410 #ifdef RTE_LIBRTE_SECURITY 1411 case DPAA2_SEC_IPSEC: 1412 ret = build_proto_fd(sess, op, fd, bpid); 1413 break; 1414 case DPAA2_SEC_PDCP: 1415 ret = build_proto_compound_fd(sess, op, fd, bpid); 1416 break; 1417 #endif 1418 case DPAA2_SEC_HASH_CIPHER: 1419 default: 1420 DPAA2_SEC_ERR("error: Unsupported session"); 1421 ret = -ENOTSUP; 1422 } 1423 } 1424 return ret; 1425 } 1426 1427 static uint16_t 1428 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, 1429 uint16_t nb_ops) 1430 { 1431 /* Function to transmit the frames to given device and VQ*/ 1432 uint32_t loop; 1433 int32_t ret; 1434 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 1435 uint32_t frames_to_send, retry_count; 1436 struct qbman_eq_desc eqdesc; 1437 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 1438 struct qbman_swp *swp; 1439 uint16_t num_tx = 0; 1440 uint32_t flags[MAX_TX_RING_SLOTS] = {0}; 1441 /*todo - need to support multiple buffer pools */ 1442 uint16_t bpid; 1443 struct rte_mempool *mb_pool; 1444 1445 if (unlikely(nb_ops == 0)) 1446 return 0; 1447 1448 if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { 1449 DPAA2_SEC_ERR("sessionless crypto op not supported"); 1450 return 0; 1451 } 1452 /*Prepare enqueue descriptor*/ 1453 qbman_eq_desc_clear(&eqdesc); 1454 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); 1455 qbman_eq_desc_set_response(&eqdesc, 0, 0); 1456 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid); 1457 1458 if (!DPAA2_PER_LCORE_DPIO) { 1459 ret = dpaa2_affine_qbman_swp(); 1460 if (ret) { 1461 DPAA2_SEC_ERR( 1462 "Failed to allocate IO portal, tid: %d\n", 1463 rte_gettid()); 1464 return 0; 1465 } 1466 } 1467 swp = DPAA2_PER_LCORE_PORTAL; 1468 1469 while (nb_ops) { 1470 frames_to_send = (nb_ops > dpaa2_eqcr_size) ? 1471 dpaa2_eqcr_size : nb_ops; 1472 1473 for (loop = 0; loop < frames_to_send; loop++) { 1474 if ((*ops)->sym->m_src->seqn) { 1475 uint8_t dqrr_index = (*ops)->sym->m_src->seqn - 1; 1476 1477 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index; 1478 DPAA2_PER_LCORE_DQRR_SIZE--; 1479 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index); 1480 (*ops)->sym->m_src->seqn = DPAA2_INVALID_MBUF_SEQN; 1481 } 1482 1483 /*Clear the unused FD fields before sending*/ 1484 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd)); 1485 mb_pool = (*ops)->sym->m_src->pool; 1486 bpid = mempool_to_bpid(mb_pool); 1487 ret = build_sec_fd(*ops, &fd_arr[loop], bpid); 1488 if (ret) { 1489 DPAA2_SEC_ERR("error: Improper packet contents" 1490 " for crypto operation"); 1491 goto skip_tx; 1492 } 1493 ops++; 1494 } 1495 1496 loop = 0; 1497 retry_count = 0; 1498 while (loop < frames_to_send) { 1499 ret = qbman_swp_enqueue_multiple(swp, &eqdesc, 1500 &fd_arr[loop], 1501 &flags[loop], 1502 frames_to_send - loop); 1503 if (unlikely(ret < 0)) { 1504 retry_count++; 1505 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) { 1506 num_tx += loop; 1507 nb_ops -= loop; 1508 goto skip_tx; 1509 } 1510 } else { 1511 loop += ret; 1512 retry_count = 0; 1513 } 1514 } 1515 1516 num_tx += loop; 1517 nb_ops -= loop; 1518 } 1519 skip_tx: 1520 dpaa2_qp->tx_vq.tx_pkts += num_tx; 1521 dpaa2_qp->tx_vq.err_pkts += nb_ops; 1522 return num_tx; 1523 } 1524 1525 #ifdef RTE_LIBRTE_SECURITY 1526 static inline struct rte_crypto_op * 1527 sec_simple_fd_to_mbuf(const struct qbman_fd *fd) 1528 { 1529 struct rte_crypto_op *op; 1530 uint16_t len = DPAA2_GET_FD_LEN(fd); 1531 int16_t diff = 0; 1532 dpaa2_sec_session *sess_priv __rte_unused; 1533 1534 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF( 1535 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)), 1536 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 1537 1538 diff = len - mbuf->pkt_len; 1539 mbuf->pkt_len += diff; 1540 mbuf->data_len += diff; 1541 op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova; 1542 mbuf->buf_iova = op->sym->aead.digest.phys_addr; 1543 op->sym->aead.digest.phys_addr = 0L; 1544 1545 sess_priv = (dpaa2_sec_session *)get_sec_session_private_data( 1546 op->sym->sec_session); 1547 if (sess_priv->dir == DIR_ENC) 1548 mbuf->data_off += SEC_FLC_DHR_OUTBOUND; 1549 else 1550 mbuf->data_off += SEC_FLC_DHR_INBOUND; 1551 1552 return op; 1553 } 1554 #endif 1555 1556 static inline struct rte_crypto_op * 1557 sec_fd_to_mbuf(const struct qbman_fd *fd) 1558 { 1559 struct qbman_fle *fle; 1560 struct rte_crypto_op *op; 1561 struct ctxt_priv *priv; 1562 struct rte_mbuf *dst, *src; 1563 1564 #ifdef RTE_LIBRTE_SECURITY 1565 if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single) 1566 return sec_simple_fd_to_mbuf(fd); 1567 #endif 1568 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); 1569 1570 DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n", 1571 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset); 1572 1573 /* we are using the first FLE entry to store Mbuf. 1574 * Currently we donot know which FLE has the mbuf stored. 1575 * So while retreiving we can go back 1 FLE from the FD -ADDR 1576 * to get the MBUF Addr from the previous FLE. 1577 * We can have a better approach to use the inline Mbuf 1578 */ 1579 1580 if (unlikely(DPAA2_GET_FD_IVP(fd))) { 1581 /* TODO complete it. */ 1582 DPAA2_SEC_ERR("error: non inline buffer"); 1583 return NULL; 1584 } 1585 op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1)); 1586 1587 /* Prefeth op */ 1588 src = op->sym->m_src; 1589 rte_prefetch0(src); 1590 1591 if (op->sym->m_dst) { 1592 dst = op->sym->m_dst; 1593 rte_prefetch0(dst); 1594 } else 1595 dst = src; 1596 1597 #ifdef RTE_LIBRTE_SECURITY 1598 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 1599 uint16_t len = DPAA2_GET_FD_LEN(fd); 1600 dst->pkt_len = len; 1601 while (dst->next != NULL) { 1602 len -= dst->data_len; 1603 dst = dst->next; 1604 } 1605 dst->data_len = len; 1606 } 1607 #endif 1608 DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p," 1609 " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n", 1610 (void *)dst, 1611 dst->buf_addr, 1612 DPAA2_GET_FD_ADDR(fd), 1613 DPAA2_GET_FD_BPID(fd), 1614 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, 1615 DPAA2_GET_FD_OFFSET(fd), 1616 DPAA2_GET_FD_LEN(fd)); 1617 1618 /* free the fle memory */ 1619 if (likely(rte_pktmbuf_is_contiguous(src))) { 1620 priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1); 1621 rte_mempool_put(priv->fle_pool, (void *)(fle-1)); 1622 } else 1623 rte_free((void *)(fle-1)); 1624 1625 return op; 1626 } 1627 1628 static uint16_t 1629 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, 1630 uint16_t nb_ops) 1631 { 1632 /* Function is responsible to receive frames for a given device and VQ*/ 1633 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 1634 struct qbman_result *dq_storage; 1635 uint32_t fqid = dpaa2_qp->rx_vq.fqid; 1636 int ret, num_rx = 0; 1637 uint8_t is_last = 0, status; 1638 struct qbman_swp *swp; 1639 const struct qbman_fd *fd; 1640 struct qbman_pull_desc pulldesc; 1641 1642 if (!DPAA2_PER_LCORE_DPIO) { 1643 ret = dpaa2_affine_qbman_swp(); 1644 if (ret) { 1645 DPAA2_SEC_ERR( 1646 "Failed to allocate IO portal, tid: %d\n", 1647 rte_gettid()); 1648 return 0; 1649 } 1650 } 1651 swp = DPAA2_PER_LCORE_PORTAL; 1652 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0]; 1653 1654 qbman_pull_desc_clear(&pulldesc); 1655 qbman_pull_desc_set_numframes(&pulldesc, 1656 (nb_ops > dpaa2_dqrr_size) ? 1657 dpaa2_dqrr_size : nb_ops); 1658 qbman_pull_desc_set_fq(&pulldesc, fqid); 1659 qbman_pull_desc_set_storage(&pulldesc, dq_storage, 1660 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage), 1661 1); 1662 1663 /*Issue a volatile dequeue command. */ 1664 while (1) { 1665 if (qbman_swp_pull(swp, &pulldesc)) { 1666 DPAA2_SEC_WARN( 1667 "SEC VDQ command is not issued : QBMAN busy"); 1668 /* Portal was busy, try again */ 1669 continue; 1670 } 1671 break; 1672 }; 1673 1674 /* Receive the packets till Last Dequeue entry is found with 1675 * respect to the above issues PULL command. 1676 */ 1677 while (!is_last) { 1678 /* Check if the previous issued command is completed. 1679 * Also seems like the SWP is shared between the Ethernet Driver 1680 * and the SEC driver. 1681 */ 1682 while (!qbman_check_command_complete(dq_storage)) 1683 ; 1684 1685 /* Loop until the dq_storage is updated with 1686 * new token by QBMAN 1687 */ 1688 while (!qbman_check_new_result(dq_storage)) 1689 ; 1690 /* Check whether Last Pull command is Expired and 1691 * setting Condition for Loop termination 1692 */ 1693 if (qbman_result_DQ_is_pull_complete(dq_storage)) { 1694 is_last = 1; 1695 /* Check for valid frame. */ 1696 status = (uint8_t)qbman_result_DQ_flags(dq_storage); 1697 if (unlikely( 1698 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) { 1699 DPAA2_SEC_DP_DEBUG("No frame is delivered\n"); 1700 continue; 1701 } 1702 } 1703 1704 fd = qbman_result_DQ_fd(dq_storage); 1705 ops[num_rx] = sec_fd_to_mbuf(fd); 1706 1707 if (unlikely(fd->simple.frc)) { 1708 /* TODO Parse SEC errors */ 1709 DPAA2_SEC_ERR("SEC returned Error - %x", 1710 fd->simple.frc); 1711 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR; 1712 } else { 1713 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 1714 } 1715 1716 num_rx++; 1717 dq_storage++; 1718 } /* End of Packet Rx loop */ 1719 1720 dpaa2_qp->rx_vq.rx_pkts += num_rx; 1721 1722 DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx); 1723 /*Return the total number of packets received to DPAA2 app*/ 1724 return num_rx; 1725 } 1726 1727 /** Release queue pair */ 1728 static int 1729 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) 1730 { 1731 struct dpaa2_sec_qp *qp = 1732 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id]; 1733 1734 PMD_INIT_FUNC_TRACE(); 1735 1736 if (qp->rx_vq.q_storage) { 1737 dpaa2_free_dq_storage(qp->rx_vq.q_storage); 1738 rte_free(qp->rx_vq.q_storage); 1739 } 1740 rte_free(qp); 1741 1742 dev->data->queue_pairs[queue_pair_id] = NULL; 1743 1744 return 0; 1745 } 1746 1747 /** Setup a queue pair */ 1748 static int 1749 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, 1750 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf, 1751 __rte_unused int socket_id) 1752 { 1753 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 1754 struct dpaa2_sec_qp *qp; 1755 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 1756 struct dpseci_rx_queue_cfg cfg; 1757 int32_t retcode; 1758 1759 PMD_INIT_FUNC_TRACE(); 1760 1761 /* If qp is already in use free ring memory and qp metadata. */ 1762 if (dev->data->queue_pairs[qp_id] != NULL) { 1763 DPAA2_SEC_INFO("QP already setup"); 1764 return 0; 1765 } 1766 1767 DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p", 1768 dev, qp_id, qp_conf); 1769 1770 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 1771 1772 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp), 1773 RTE_CACHE_LINE_SIZE); 1774 if (!qp) { 1775 DPAA2_SEC_ERR("malloc failed for rx/tx queues"); 1776 return -ENOMEM; 1777 } 1778 1779 qp->rx_vq.crypto_data = dev->data; 1780 qp->tx_vq.crypto_data = dev->data; 1781 qp->rx_vq.q_storage = rte_malloc("sec dq storage", 1782 sizeof(struct queue_storage_info_t), 1783 RTE_CACHE_LINE_SIZE); 1784 if (!qp->rx_vq.q_storage) { 1785 DPAA2_SEC_ERR("malloc failed for q_storage"); 1786 return -ENOMEM; 1787 } 1788 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t)); 1789 1790 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) { 1791 DPAA2_SEC_ERR("Unable to allocate dequeue storage"); 1792 return -ENOMEM; 1793 } 1794 1795 dev->data->queue_pairs[qp_id] = qp; 1796 1797 cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX; 1798 cfg.user_ctx = (size_t)(&qp->rx_vq); 1799 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 1800 qp_id, &cfg); 1801 return retcode; 1802 } 1803 1804 /** Returns the size of the aesni gcm session structure */ 1805 static unsigned int 1806 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) 1807 { 1808 PMD_INIT_FUNC_TRACE(); 1809 1810 return sizeof(dpaa2_sec_session); 1811 } 1812 1813 static int 1814 dpaa2_sec_cipher_init(struct rte_cryptodev *dev, 1815 struct rte_crypto_sym_xform *xform, 1816 dpaa2_sec_session *session) 1817 { 1818 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1819 struct alginfo cipherdata; 1820 int bufsize, ret = 0; 1821 struct ctxt_priv *priv; 1822 struct sec_flow_context *flc; 1823 1824 PMD_INIT_FUNC_TRACE(); 1825 1826 /* For SEC CIPHER only one descriptor is required. */ 1827 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1828 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 1829 RTE_CACHE_LINE_SIZE); 1830 if (priv == NULL) { 1831 DPAA2_SEC_ERR("No Memory for priv CTXT"); 1832 return -ENOMEM; 1833 } 1834 1835 priv->fle_pool = dev_priv->fle_pool; 1836 1837 flc = &priv->flc_desc[0].flc; 1838 1839 session->ctxt_type = DPAA2_SEC_CIPHER; 1840 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, 1841 RTE_CACHE_LINE_SIZE); 1842 if (session->cipher_key.data == NULL) { 1843 DPAA2_SEC_ERR("No Memory for cipher key"); 1844 rte_free(priv); 1845 return -ENOMEM; 1846 } 1847 session->cipher_key.length = xform->cipher.key.length; 1848 1849 memcpy(session->cipher_key.data, xform->cipher.key.data, 1850 xform->cipher.key.length); 1851 cipherdata.key = (size_t)session->cipher_key.data; 1852 cipherdata.keylen = session->cipher_key.length; 1853 cipherdata.key_enc_flags = 0; 1854 cipherdata.key_type = RTA_DATA_IMM; 1855 1856 /* Set IV parameters */ 1857 session->iv.offset = xform->cipher.iv.offset; 1858 session->iv.length = xform->cipher.iv.length; 1859 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1860 DIR_ENC : DIR_DEC; 1861 1862 switch (xform->cipher.algo) { 1863 case RTE_CRYPTO_CIPHER_AES_CBC: 1864 cipherdata.algtype = OP_ALG_ALGSEL_AES; 1865 cipherdata.algmode = OP_ALG_AAI_CBC; 1866 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 1867 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 1868 SHR_NEVER, &cipherdata, 1869 session->iv.length, 1870 session->dir); 1871 break; 1872 case RTE_CRYPTO_CIPHER_3DES_CBC: 1873 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 1874 cipherdata.algmode = OP_ALG_AAI_CBC; 1875 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 1876 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 1877 SHR_NEVER, &cipherdata, 1878 session->iv.length, 1879 session->dir); 1880 break; 1881 case RTE_CRYPTO_CIPHER_AES_CTR: 1882 cipherdata.algtype = OP_ALG_ALGSEL_AES; 1883 cipherdata.algmode = OP_ALG_AAI_CTR; 1884 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 1885 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 1886 SHR_NEVER, &cipherdata, 1887 session->iv.length, 1888 session->dir); 1889 break; 1890 case RTE_CRYPTO_CIPHER_3DES_CTR: 1891 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 1892 cipherdata.algmode = OP_ALG_AAI_CTR; 1893 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CTR; 1894 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 1895 SHR_NEVER, &cipherdata, 1896 session->iv.length, 1897 session->dir); 1898 break; 1899 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 1900 cipherdata.algtype = OP_ALG_ALGSEL_SNOW_F8; 1901 session->cipher_alg = RTE_CRYPTO_CIPHER_SNOW3G_UEA2; 1902 bufsize = cnstr_shdsc_snow_f8(priv->flc_desc[0].desc, 1, 0, 1903 &cipherdata, 1904 session->dir); 1905 break; 1906 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 1907 cipherdata.algtype = OP_ALG_ALGSEL_ZUCE; 1908 session->cipher_alg = RTE_CRYPTO_CIPHER_ZUC_EEA3; 1909 bufsize = cnstr_shdsc_zuce(priv->flc_desc[0].desc, 1, 0, 1910 &cipherdata, 1911 session->dir); 1912 break; 1913 case RTE_CRYPTO_CIPHER_KASUMI_F8: 1914 case RTE_CRYPTO_CIPHER_AES_F8: 1915 case RTE_CRYPTO_CIPHER_AES_ECB: 1916 case RTE_CRYPTO_CIPHER_3DES_ECB: 1917 case RTE_CRYPTO_CIPHER_AES_XTS: 1918 case RTE_CRYPTO_CIPHER_ARC4: 1919 case RTE_CRYPTO_CIPHER_NULL: 1920 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", 1921 xform->cipher.algo); 1922 ret = -ENOTSUP; 1923 goto error_out; 1924 default: 1925 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 1926 xform->cipher.algo); 1927 ret = -ENOTSUP; 1928 goto error_out; 1929 } 1930 1931 if (bufsize < 0) { 1932 DPAA2_SEC_ERR("Crypto: Descriptor build failed"); 1933 ret = -EINVAL; 1934 goto error_out; 1935 } 1936 1937 flc->word1_sdl = (uint8_t)bufsize; 1938 session->ctxt = priv; 1939 1940 #ifdef CAAM_DESC_DEBUG 1941 int i; 1942 for (i = 0; i < bufsize; i++) 1943 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]); 1944 #endif 1945 return ret; 1946 1947 error_out: 1948 rte_free(session->cipher_key.data); 1949 rte_free(priv); 1950 return ret; 1951 } 1952 1953 static int 1954 dpaa2_sec_auth_init(struct rte_cryptodev *dev, 1955 struct rte_crypto_sym_xform *xform, 1956 dpaa2_sec_session *session) 1957 { 1958 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1959 struct alginfo authdata; 1960 int bufsize, ret = 0; 1961 struct ctxt_priv *priv; 1962 struct sec_flow_context *flc; 1963 1964 PMD_INIT_FUNC_TRACE(); 1965 1966 /* For SEC AUTH three descriptors are required for various stages */ 1967 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1968 sizeof(struct ctxt_priv) + 3 * 1969 sizeof(struct sec_flc_desc), 1970 RTE_CACHE_LINE_SIZE); 1971 if (priv == NULL) { 1972 DPAA2_SEC_ERR("No Memory for priv CTXT"); 1973 return -ENOMEM; 1974 } 1975 1976 priv->fle_pool = dev_priv->fle_pool; 1977 flc = &priv->flc_desc[DESC_INITFINAL].flc; 1978 1979 session->ctxt_type = DPAA2_SEC_AUTH; 1980 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length, 1981 RTE_CACHE_LINE_SIZE); 1982 if (session->auth_key.data == NULL) { 1983 DPAA2_SEC_ERR("Unable to allocate memory for auth key"); 1984 rte_free(priv); 1985 return -ENOMEM; 1986 } 1987 session->auth_key.length = xform->auth.key.length; 1988 1989 memcpy(session->auth_key.data, xform->auth.key.data, 1990 xform->auth.key.length); 1991 authdata.key = (size_t)session->auth_key.data; 1992 authdata.keylen = session->auth_key.length; 1993 authdata.key_enc_flags = 0; 1994 authdata.key_type = RTA_DATA_IMM; 1995 1996 session->digest_length = xform->auth.digest_length; 1997 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? 1998 DIR_ENC : DIR_DEC; 1999 2000 switch (xform->auth.algo) { 2001 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2002 authdata.algtype = OP_ALG_ALGSEL_SHA1; 2003 authdata.algmode = OP_ALG_AAI_HMAC; 2004 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 2005 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2006 1, 0, SHR_NEVER, &authdata, 2007 !session->dir, 2008 session->digest_length); 2009 break; 2010 case RTE_CRYPTO_AUTH_MD5_HMAC: 2011 authdata.algtype = OP_ALG_ALGSEL_MD5; 2012 authdata.algmode = OP_ALG_AAI_HMAC; 2013 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 2014 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2015 1, 0, SHR_NEVER, &authdata, 2016 !session->dir, 2017 session->digest_length); 2018 break; 2019 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2020 authdata.algtype = OP_ALG_ALGSEL_SHA256; 2021 authdata.algmode = OP_ALG_AAI_HMAC; 2022 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 2023 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2024 1, 0, SHR_NEVER, &authdata, 2025 !session->dir, 2026 session->digest_length); 2027 break; 2028 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2029 authdata.algtype = OP_ALG_ALGSEL_SHA384; 2030 authdata.algmode = OP_ALG_AAI_HMAC; 2031 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 2032 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2033 1, 0, SHR_NEVER, &authdata, 2034 !session->dir, 2035 session->digest_length); 2036 break; 2037 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2038 authdata.algtype = OP_ALG_ALGSEL_SHA512; 2039 authdata.algmode = OP_ALG_AAI_HMAC; 2040 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 2041 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2042 1, 0, SHR_NEVER, &authdata, 2043 !session->dir, 2044 session->digest_length); 2045 break; 2046 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2047 authdata.algtype = OP_ALG_ALGSEL_SHA224; 2048 authdata.algmode = OP_ALG_AAI_HMAC; 2049 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 2050 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2051 1, 0, SHR_NEVER, &authdata, 2052 !session->dir, 2053 session->digest_length); 2054 break; 2055 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2056 authdata.algtype = OP_ALG_ALGSEL_SNOW_F9; 2057 authdata.algmode = OP_ALG_AAI_F9; 2058 session->auth_alg = RTE_CRYPTO_AUTH_SNOW3G_UIA2; 2059 session->iv.offset = xform->auth.iv.offset; 2060 session->iv.length = xform->auth.iv.length; 2061 bufsize = cnstr_shdsc_snow_f9(priv->flc_desc[DESC_INITFINAL].desc, 2062 1, 0, &authdata, 2063 !session->dir, 2064 session->digest_length); 2065 break; 2066 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2067 authdata.algtype = OP_ALG_ALGSEL_ZUCA; 2068 authdata.algmode = OP_ALG_AAI_F9; 2069 session->auth_alg = RTE_CRYPTO_AUTH_ZUC_EIA3; 2070 session->iv.offset = xform->auth.iv.offset; 2071 session->iv.length = xform->auth.iv.length; 2072 bufsize = cnstr_shdsc_zuca(priv->flc_desc[DESC_INITFINAL].desc, 2073 1, 0, &authdata, 2074 !session->dir, 2075 session->digest_length); 2076 break; 2077 case RTE_CRYPTO_AUTH_KASUMI_F9: 2078 case RTE_CRYPTO_AUTH_NULL: 2079 case RTE_CRYPTO_AUTH_SHA1: 2080 case RTE_CRYPTO_AUTH_SHA256: 2081 case RTE_CRYPTO_AUTH_SHA512: 2082 case RTE_CRYPTO_AUTH_SHA224: 2083 case RTE_CRYPTO_AUTH_SHA384: 2084 case RTE_CRYPTO_AUTH_MD5: 2085 case RTE_CRYPTO_AUTH_AES_GMAC: 2086 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2087 case RTE_CRYPTO_AUTH_AES_CMAC: 2088 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 2089 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un", 2090 xform->auth.algo); 2091 ret = -ENOTSUP; 2092 goto error_out; 2093 default: 2094 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", 2095 xform->auth.algo); 2096 ret = -ENOTSUP; 2097 goto error_out; 2098 } 2099 2100 if (bufsize < 0) { 2101 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2102 ret = -EINVAL; 2103 goto error_out; 2104 } 2105 2106 flc->word1_sdl = (uint8_t)bufsize; 2107 session->ctxt = priv; 2108 #ifdef CAAM_DESC_DEBUG 2109 int i; 2110 for (i = 0; i < bufsize; i++) 2111 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", 2112 i, priv->flc_desc[DESC_INITFINAL].desc[i]); 2113 #endif 2114 2115 return ret; 2116 2117 error_out: 2118 rte_free(session->auth_key.data); 2119 rte_free(priv); 2120 return ret; 2121 } 2122 2123 static int 2124 dpaa2_sec_aead_init(struct rte_cryptodev *dev, 2125 struct rte_crypto_sym_xform *xform, 2126 dpaa2_sec_session *session) 2127 { 2128 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt; 2129 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 2130 struct alginfo aeaddata; 2131 int bufsize; 2132 struct ctxt_priv *priv; 2133 struct sec_flow_context *flc; 2134 struct rte_crypto_aead_xform *aead_xform = &xform->aead; 2135 int err, ret = 0; 2136 2137 PMD_INIT_FUNC_TRACE(); 2138 2139 /* Set IV parameters */ 2140 session->iv.offset = aead_xform->iv.offset; 2141 session->iv.length = aead_xform->iv.length; 2142 session->ctxt_type = DPAA2_SEC_AEAD; 2143 2144 /* For SEC AEAD only one descriptor is required */ 2145 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2146 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 2147 RTE_CACHE_LINE_SIZE); 2148 if (priv == NULL) { 2149 DPAA2_SEC_ERR("No Memory for priv CTXT"); 2150 return -ENOMEM; 2151 } 2152 2153 priv->fle_pool = dev_priv->fle_pool; 2154 flc = &priv->flc_desc[0].flc; 2155 2156 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 2157 RTE_CACHE_LINE_SIZE); 2158 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 2159 DPAA2_SEC_ERR("No Memory for aead key"); 2160 rte_free(priv); 2161 return -ENOMEM; 2162 } 2163 memcpy(session->aead_key.data, aead_xform->key.data, 2164 aead_xform->key.length); 2165 2166 session->digest_length = aead_xform->digest_length; 2167 session->aead_key.length = aead_xform->key.length; 2168 ctxt->auth_only_len = aead_xform->aad_length; 2169 2170 aeaddata.key = (size_t)session->aead_key.data; 2171 aeaddata.keylen = session->aead_key.length; 2172 aeaddata.key_enc_flags = 0; 2173 aeaddata.key_type = RTA_DATA_IMM; 2174 2175 switch (aead_xform->algo) { 2176 case RTE_CRYPTO_AEAD_AES_GCM: 2177 aeaddata.algtype = OP_ALG_ALGSEL_AES; 2178 aeaddata.algmode = OP_ALG_AAI_GCM; 2179 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; 2180 break; 2181 case RTE_CRYPTO_AEAD_AES_CCM: 2182 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u", 2183 aead_xform->algo); 2184 ret = -ENOTSUP; 2185 goto error_out; 2186 default: 2187 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u", 2188 aead_xform->algo); 2189 ret = -ENOTSUP; 2190 goto error_out; 2191 } 2192 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 2193 DIR_ENC : DIR_DEC; 2194 2195 priv->flc_desc[0].desc[0] = aeaddata.keylen; 2196 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 2197 DESC_JOB_IO_LEN, 2198 (unsigned int *)priv->flc_desc[0].desc, 2199 &priv->flc_desc[0].desc[1], 1); 2200 2201 if (err < 0) { 2202 DPAA2_SEC_ERR("Crypto: Incorrect key lengths"); 2203 ret = -EINVAL; 2204 goto error_out; 2205 } 2206 if (priv->flc_desc[0].desc[1] & 1) { 2207 aeaddata.key_type = RTA_DATA_IMM; 2208 } else { 2209 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key); 2210 aeaddata.key_type = RTA_DATA_PTR; 2211 } 2212 priv->flc_desc[0].desc[0] = 0; 2213 priv->flc_desc[0].desc[1] = 0; 2214 2215 if (session->dir == DIR_ENC) 2216 bufsize = cnstr_shdsc_gcm_encap( 2217 priv->flc_desc[0].desc, 1, 0, SHR_NEVER, 2218 &aeaddata, session->iv.length, 2219 session->digest_length); 2220 else 2221 bufsize = cnstr_shdsc_gcm_decap( 2222 priv->flc_desc[0].desc, 1, 0, SHR_NEVER, 2223 &aeaddata, session->iv.length, 2224 session->digest_length); 2225 if (bufsize < 0) { 2226 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2227 ret = -EINVAL; 2228 goto error_out; 2229 } 2230 2231 flc->word1_sdl = (uint8_t)bufsize; 2232 session->ctxt = priv; 2233 #ifdef CAAM_DESC_DEBUG 2234 int i; 2235 for (i = 0; i < bufsize; i++) 2236 DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n", 2237 i, priv->flc_desc[0].desc[i]); 2238 #endif 2239 return ret; 2240 2241 error_out: 2242 rte_free(session->aead_key.data); 2243 rte_free(priv); 2244 return ret; 2245 } 2246 2247 2248 static int 2249 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, 2250 struct rte_crypto_sym_xform *xform, 2251 dpaa2_sec_session *session) 2252 { 2253 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 2254 struct alginfo authdata, cipherdata; 2255 int bufsize; 2256 struct ctxt_priv *priv; 2257 struct sec_flow_context *flc; 2258 struct rte_crypto_cipher_xform *cipher_xform; 2259 struct rte_crypto_auth_xform *auth_xform; 2260 int err, ret = 0; 2261 2262 PMD_INIT_FUNC_TRACE(); 2263 2264 if (session->ext_params.aead_ctxt.auth_cipher_text) { 2265 cipher_xform = &xform->cipher; 2266 auth_xform = &xform->next->auth; 2267 session->ctxt_type = 2268 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2269 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER; 2270 } else { 2271 cipher_xform = &xform->next->cipher; 2272 auth_xform = &xform->auth; 2273 session->ctxt_type = 2274 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2275 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH; 2276 } 2277 2278 /* Set IV parameters */ 2279 session->iv.offset = cipher_xform->iv.offset; 2280 session->iv.length = cipher_xform->iv.length; 2281 2282 /* For SEC AEAD only one descriptor is required */ 2283 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2284 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 2285 RTE_CACHE_LINE_SIZE); 2286 if (priv == NULL) { 2287 DPAA2_SEC_ERR("No Memory for priv CTXT"); 2288 return -ENOMEM; 2289 } 2290 2291 priv->fle_pool = dev_priv->fle_pool; 2292 flc = &priv->flc_desc[0].flc; 2293 2294 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length, 2295 RTE_CACHE_LINE_SIZE); 2296 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) { 2297 DPAA2_SEC_ERR("No Memory for cipher key"); 2298 rte_free(priv); 2299 return -ENOMEM; 2300 } 2301 session->cipher_key.length = cipher_xform->key.length; 2302 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length, 2303 RTE_CACHE_LINE_SIZE); 2304 if (session->auth_key.data == NULL && auth_xform->key.length > 0) { 2305 DPAA2_SEC_ERR("No Memory for auth key"); 2306 rte_free(session->cipher_key.data); 2307 rte_free(priv); 2308 return -ENOMEM; 2309 } 2310 session->auth_key.length = auth_xform->key.length; 2311 memcpy(session->cipher_key.data, cipher_xform->key.data, 2312 cipher_xform->key.length); 2313 memcpy(session->auth_key.data, auth_xform->key.data, 2314 auth_xform->key.length); 2315 2316 authdata.key = (size_t)session->auth_key.data; 2317 authdata.keylen = session->auth_key.length; 2318 authdata.key_enc_flags = 0; 2319 authdata.key_type = RTA_DATA_IMM; 2320 2321 session->digest_length = auth_xform->digest_length; 2322 2323 switch (auth_xform->algo) { 2324 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2325 authdata.algtype = OP_ALG_ALGSEL_SHA1; 2326 authdata.algmode = OP_ALG_AAI_HMAC; 2327 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 2328 break; 2329 case RTE_CRYPTO_AUTH_MD5_HMAC: 2330 authdata.algtype = OP_ALG_ALGSEL_MD5; 2331 authdata.algmode = OP_ALG_AAI_HMAC; 2332 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 2333 break; 2334 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2335 authdata.algtype = OP_ALG_ALGSEL_SHA224; 2336 authdata.algmode = OP_ALG_AAI_HMAC; 2337 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 2338 break; 2339 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2340 authdata.algtype = OP_ALG_ALGSEL_SHA256; 2341 authdata.algmode = OP_ALG_AAI_HMAC; 2342 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 2343 break; 2344 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2345 authdata.algtype = OP_ALG_ALGSEL_SHA384; 2346 authdata.algmode = OP_ALG_AAI_HMAC; 2347 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 2348 break; 2349 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2350 authdata.algtype = OP_ALG_ALGSEL_SHA512; 2351 authdata.algmode = OP_ALG_AAI_HMAC; 2352 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 2353 break; 2354 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2355 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2356 case RTE_CRYPTO_AUTH_NULL: 2357 case RTE_CRYPTO_AUTH_SHA1: 2358 case RTE_CRYPTO_AUTH_SHA256: 2359 case RTE_CRYPTO_AUTH_SHA512: 2360 case RTE_CRYPTO_AUTH_SHA224: 2361 case RTE_CRYPTO_AUTH_SHA384: 2362 case RTE_CRYPTO_AUTH_MD5: 2363 case RTE_CRYPTO_AUTH_AES_GMAC: 2364 case RTE_CRYPTO_AUTH_KASUMI_F9: 2365 case RTE_CRYPTO_AUTH_AES_CMAC: 2366 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 2367 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2368 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 2369 auth_xform->algo); 2370 ret = -ENOTSUP; 2371 goto error_out; 2372 default: 2373 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", 2374 auth_xform->algo); 2375 ret = -ENOTSUP; 2376 goto error_out; 2377 } 2378 cipherdata.key = (size_t)session->cipher_key.data; 2379 cipherdata.keylen = session->cipher_key.length; 2380 cipherdata.key_enc_flags = 0; 2381 cipherdata.key_type = RTA_DATA_IMM; 2382 2383 switch (cipher_xform->algo) { 2384 case RTE_CRYPTO_CIPHER_AES_CBC: 2385 cipherdata.algtype = OP_ALG_ALGSEL_AES; 2386 cipherdata.algmode = OP_ALG_AAI_CBC; 2387 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 2388 break; 2389 case RTE_CRYPTO_CIPHER_3DES_CBC: 2390 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 2391 cipherdata.algmode = OP_ALG_AAI_CBC; 2392 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 2393 break; 2394 case RTE_CRYPTO_CIPHER_AES_CTR: 2395 cipherdata.algtype = OP_ALG_ALGSEL_AES; 2396 cipherdata.algmode = OP_ALG_AAI_CTR; 2397 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 2398 break; 2399 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2400 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2401 case RTE_CRYPTO_CIPHER_NULL: 2402 case RTE_CRYPTO_CIPHER_3DES_ECB: 2403 case RTE_CRYPTO_CIPHER_AES_ECB: 2404 case RTE_CRYPTO_CIPHER_KASUMI_F8: 2405 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", 2406 cipher_xform->algo); 2407 ret = -ENOTSUP; 2408 goto error_out; 2409 default: 2410 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 2411 cipher_xform->algo); 2412 ret = -ENOTSUP; 2413 goto error_out; 2414 } 2415 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2416 DIR_ENC : DIR_DEC; 2417 2418 priv->flc_desc[0].desc[0] = cipherdata.keylen; 2419 priv->flc_desc[0].desc[1] = authdata.keylen; 2420 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 2421 DESC_JOB_IO_LEN, 2422 (unsigned int *)priv->flc_desc[0].desc, 2423 &priv->flc_desc[0].desc[2], 2); 2424 2425 if (err < 0) { 2426 DPAA2_SEC_ERR("Crypto: Incorrect key lengths"); 2427 ret = -EINVAL; 2428 goto error_out; 2429 } 2430 if (priv->flc_desc[0].desc[2] & 1) { 2431 cipherdata.key_type = RTA_DATA_IMM; 2432 } else { 2433 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); 2434 cipherdata.key_type = RTA_DATA_PTR; 2435 } 2436 if (priv->flc_desc[0].desc[2] & (1 << 1)) { 2437 authdata.key_type = RTA_DATA_IMM; 2438 } else { 2439 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key); 2440 authdata.key_type = RTA_DATA_PTR; 2441 } 2442 priv->flc_desc[0].desc[0] = 0; 2443 priv->flc_desc[0].desc[1] = 0; 2444 priv->flc_desc[0].desc[2] = 0; 2445 2446 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) { 2447 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1, 2448 0, SHR_SERIAL, 2449 &cipherdata, &authdata, 2450 session->iv.length, 2451 session->digest_length, 2452 session->dir); 2453 if (bufsize < 0) { 2454 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2455 ret = -EINVAL; 2456 goto error_out; 2457 } 2458 } else { 2459 DPAA2_SEC_ERR("Hash before cipher not supported"); 2460 ret = -ENOTSUP; 2461 goto error_out; 2462 } 2463 2464 flc->word1_sdl = (uint8_t)bufsize; 2465 session->ctxt = priv; 2466 #ifdef CAAM_DESC_DEBUG 2467 int i; 2468 for (i = 0; i < bufsize; i++) 2469 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", 2470 i, priv->flc_desc[0].desc[i]); 2471 #endif 2472 2473 return ret; 2474 2475 error_out: 2476 rte_free(session->cipher_key.data); 2477 rte_free(session->auth_key.data); 2478 rte_free(priv); 2479 return ret; 2480 } 2481 2482 static int 2483 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev, 2484 struct rte_crypto_sym_xform *xform, void *sess) 2485 { 2486 dpaa2_sec_session *session = sess; 2487 int ret; 2488 2489 PMD_INIT_FUNC_TRACE(); 2490 2491 if (unlikely(sess == NULL)) { 2492 DPAA2_SEC_ERR("Invalid session struct"); 2493 return -EINVAL; 2494 } 2495 2496 memset(session, 0, sizeof(dpaa2_sec_session)); 2497 /* Default IV length = 0 */ 2498 session->iv.length = 0; 2499 2500 /* Cipher Only */ 2501 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 2502 ret = dpaa2_sec_cipher_init(dev, xform, session); 2503 2504 /* Authentication Only */ 2505 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2506 xform->next == NULL) { 2507 ret = dpaa2_sec_auth_init(dev, xform, session); 2508 2509 /* Cipher then Authenticate */ 2510 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 2511 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2512 session->ext_params.aead_ctxt.auth_cipher_text = true; 2513 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL) 2514 ret = dpaa2_sec_auth_init(dev, xform, session); 2515 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL) 2516 ret = dpaa2_sec_cipher_init(dev, xform, session); 2517 else 2518 ret = dpaa2_sec_aead_chain_init(dev, xform, session); 2519 /* Authenticate then Cipher */ 2520 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2521 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2522 session->ext_params.aead_ctxt.auth_cipher_text = false; 2523 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL) 2524 ret = dpaa2_sec_cipher_init(dev, xform, session); 2525 else if (xform->next->cipher.algo == RTE_CRYPTO_CIPHER_NULL) 2526 ret = dpaa2_sec_auth_init(dev, xform, session); 2527 else 2528 ret = dpaa2_sec_aead_chain_init(dev, xform, session); 2529 /* AEAD operation for AES-GCM kind of Algorithms */ 2530 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && 2531 xform->next == NULL) { 2532 ret = dpaa2_sec_aead_init(dev, xform, session); 2533 2534 } else { 2535 DPAA2_SEC_ERR("Invalid crypto type"); 2536 return -EINVAL; 2537 } 2538 2539 return ret; 2540 } 2541 2542 #ifdef RTE_LIBRTE_SECURITY 2543 static int 2544 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform, 2545 dpaa2_sec_session *session, 2546 struct alginfo *aeaddata) 2547 { 2548 PMD_INIT_FUNC_TRACE(); 2549 2550 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 2551 RTE_CACHE_LINE_SIZE); 2552 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 2553 DPAA2_SEC_ERR("No Memory for aead key"); 2554 return -ENOMEM; 2555 } 2556 memcpy(session->aead_key.data, aead_xform->key.data, 2557 aead_xform->key.length); 2558 2559 session->digest_length = aead_xform->digest_length; 2560 session->aead_key.length = aead_xform->key.length; 2561 2562 aeaddata->key = (size_t)session->aead_key.data; 2563 aeaddata->keylen = session->aead_key.length; 2564 aeaddata->key_enc_flags = 0; 2565 aeaddata->key_type = RTA_DATA_IMM; 2566 2567 switch (aead_xform->algo) { 2568 case RTE_CRYPTO_AEAD_AES_GCM: 2569 switch (session->digest_length) { 2570 case 8: 2571 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM8; 2572 break; 2573 case 12: 2574 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM12; 2575 break; 2576 case 16: 2577 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM16; 2578 break; 2579 default: 2580 DPAA2_SEC_ERR("Crypto: Undefined GCM digest %d", 2581 session->digest_length); 2582 return -EINVAL; 2583 } 2584 aeaddata->algmode = OP_ALG_AAI_GCM; 2585 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; 2586 break; 2587 case RTE_CRYPTO_AEAD_AES_CCM: 2588 switch (session->digest_length) { 2589 case 8: 2590 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM8; 2591 break; 2592 case 12: 2593 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM12; 2594 break; 2595 case 16: 2596 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM16; 2597 break; 2598 default: 2599 DPAA2_SEC_ERR("Crypto: Undefined CCM digest %d", 2600 session->digest_length); 2601 return -EINVAL; 2602 } 2603 aeaddata->algmode = OP_ALG_AAI_CCM; 2604 session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM; 2605 break; 2606 default: 2607 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u", 2608 aead_xform->algo); 2609 return -ENOTSUP; 2610 } 2611 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 2612 DIR_ENC : DIR_DEC; 2613 2614 return 0; 2615 } 2616 2617 static int 2618 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform, 2619 struct rte_crypto_auth_xform *auth_xform, 2620 dpaa2_sec_session *session, 2621 struct alginfo *cipherdata, 2622 struct alginfo *authdata) 2623 { 2624 if (cipher_xform) { 2625 session->cipher_key.data = rte_zmalloc(NULL, 2626 cipher_xform->key.length, 2627 RTE_CACHE_LINE_SIZE); 2628 if (session->cipher_key.data == NULL && 2629 cipher_xform->key.length > 0) { 2630 DPAA2_SEC_ERR("No Memory for cipher key"); 2631 return -ENOMEM; 2632 } 2633 2634 session->cipher_key.length = cipher_xform->key.length; 2635 memcpy(session->cipher_key.data, cipher_xform->key.data, 2636 cipher_xform->key.length); 2637 session->cipher_alg = cipher_xform->algo; 2638 } else { 2639 session->cipher_key.data = NULL; 2640 session->cipher_key.length = 0; 2641 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2642 } 2643 2644 if (auth_xform) { 2645 session->auth_key.data = rte_zmalloc(NULL, 2646 auth_xform->key.length, 2647 RTE_CACHE_LINE_SIZE); 2648 if (session->auth_key.data == NULL && 2649 auth_xform->key.length > 0) { 2650 DPAA2_SEC_ERR("No Memory for auth key"); 2651 return -ENOMEM; 2652 } 2653 session->auth_key.length = auth_xform->key.length; 2654 memcpy(session->auth_key.data, auth_xform->key.data, 2655 auth_xform->key.length); 2656 session->auth_alg = auth_xform->algo; 2657 session->digest_length = auth_xform->digest_length; 2658 } else { 2659 session->auth_key.data = NULL; 2660 session->auth_key.length = 0; 2661 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 2662 } 2663 2664 authdata->key = (size_t)session->auth_key.data; 2665 authdata->keylen = session->auth_key.length; 2666 authdata->key_enc_flags = 0; 2667 authdata->key_type = RTA_DATA_IMM; 2668 switch (session->auth_alg) { 2669 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2670 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96; 2671 authdata->algmode = OP_ALG_AAI_HMAC; 2672 break; 2673 case RTE_CRYPTO_AUTH_MD5_HMAC: 2674 authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96; 2675 authdata->algmode = OP_ALG_AAI_HMAC; 2676 break; 2677 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2678 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128; 2679 authdata->algmode = OP_ALG_AAI_HMAC; 2680 if (session->digest_length != 16) 2681 DPAA2_SEC_WARN( 2682 "+++Using sha256-hmac truncated len is non-standard," 2683 "it will not work with lookaside proto"); 2684 break; 2685 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2686 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192; 2687 authdata->algmode = OP_ALG_AAI_HMAC; 2688 break; 2689 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2690 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256; 2691 authdata->algmode = OP_ALG_AAI_HMAC; 2692 break; 2693 case RTE_CRYPTO_AUTH_AES_CMAC: 2694 authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96; 2695 break; 2696 case RTE_CRYPTO_AUTH_NULL: 2697 authdata->algtype = OP_PCL_IPSEC_HMAC_NULL; 2698 break; 2699 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2700 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2701 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2702 case RTE_CRYPTO_AUTH_SHA1: 2703 case RTE_CRYPTO_AUTH_SHA256: 2704 case RTE_CRYPTO_AUTH_SHA512: 2705 case RTE_CRYPTO_AUTH_SHA224: 2706 case RTE_CRYPTO_AUTH_SHA384: 2707 case RTE_CRYPTO_AUTH_MD5: 2708 case RTE_CRYPTO_AUTH_AES_GMAC: 2709 case RTE_CRYPTO_AUTH_KASUMI_F9: 2710 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 2711 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2712 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 2713 session->auth_alg); 2714 return -ENOTSUP; 2715 default: 2716 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", 2717 session->auth_alg); 2718 return -ENOTSUP; 2719 } 2720 cipherdata->key = (size_t)session->cipher_key.data; 2721 cipherdata->keylen = session->cipher_key.length; 2722 cipherdata->key_enc_flags = 0; 2723 cipherdata->key_type = RTA_DATA_IMM; 2724 2725 switch (session->cipher_alg) { 2726 case RTE_CRYPTO_CIPHER_AES_CBC: 2727 cipherdata->algtype = OP_PCL_IPSEC_AES_CBC; 2728 cipherdata->algmode = OP_ALG_AAI_CBC; 2729 break; 2730 case RTE_CRYPTO_CIPHER_3DES_CBC: 2731 cipherdata->algtype = OP_PCL_IPSEC_3DES; 2732 cipherdata->algmode = OP_ALG_AAI_CBC; 2733 break; 2734 case RTE_CRYPTO_CIPHER_AES_CTR: 2735 cipherdata->algtype = OP_PCL_IPSEC_AES_CTR; 2736 cipherdata->algmode = OP_ALG_AAI_CTR; 2737 break; 2738 case RTE_CRYPTO_CIPHER_NULL: 2739 cipherdata->algtype = OP_PCL_IPSEC_NULL; 2740 break; 2741 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2742 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2743 case RTE_CRYPTO_CIPHER_3DES_ECB: 2744 case RTE_CRYPTO_CIPHER_AES_ECB: 2745 case RTE_CRYPTO_CIPHER_KASUMI_F8: 2746 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", 2747 session->cipher_alg); 2748 return -ENOTSUP; 2749 default: 2750 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 2751 session->cipher_alg); 2752 return -ENOTSUP; 2753 } 2754 2755 return 0; 2756 } 2757 2758 #ifdef RTE_LIBRTE_SECURITY_TEST 2759 static uint8_t aes_cbc_iv[] = { 2760 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 2761 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }; 2762 #endif 2763 2764 static int 2765 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, 2766 struct rte_security_session_conf *conf, 2767 void *sess) 2768 { 2769 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec; 2770 struct rte_crypto_cipher_xform *cipher_xform = NULL; 2771 struct rte_crypto_auth_xform *auth_xform = NULL; 2772 struct rte_crypto_aead_xform *aead_xform = NULL; 2773 dpaa2_sec_session *session = (dpaa2_sec_session *)sess; 2774 struct ctxt_priv *priv; 2775 struct alginfo authdata, cipherdata; 2776 int bufsize; 2777 struct sec_flow_context *flc; 2778 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 2779 int ret = -1; 2780 2781 PMD_INIT_FUNC_TRACE(); 2782 2783 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2784 sizeof(struct ctxt_priv) + 2785 sizeof(struct sec_flc_desc), 2786 RTE_CACHE_LINE_SIZE); 2787 2788 if (priv == NULL) { 2789 DPAA2_SEC_ERR("No memory for priv CTXT"); 2790 return -ENOMEM; 2791 } 2792 2793 priv->fle_pool = dev_priv->fle_pool; 2794 flc = &priv->flc_desc[0].flc; 2795 2796 memset(session, 0, sizeof(dpaa2_sec_session)); 2797 2798 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2799 cipher_xform = &conf->crypto_xform->cipher; 2800 if (conf->crypto_xform->next) 2801 auth_xform = &conf->crypto_xform->next->auth; 2802 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform, 2803 session, &cipherdata, &authdata); 2804 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2805 auth_xform = &conf->crypto_xform->auth; 2806 if (conf->crypto_xform->next) 2807 cipher_xform = &conf->crypto_xform->next->cipher; 2808 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform, 2809 session, &cipherdata, &authdata); 2810 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { 2811 aead_xform = &conf->crypto_xform->aead; 2812 ret = dpaa2_sec_ipsec_aead_init(aead_xform, 2813 session, &cipherdata); 2814 authdata.keylen = 0; 2815 authdata.algtype = 0; 2816 } else { 2817 DPAA2_SEC_ERR("XFORM not specified"); 2818 ret = -EINVAL; 2819 goto out; 2820 } 2821 if (ret) { 2822 DPAA2_SEC_ERR("Failed to process xform"); 2823 goto out; 2824 } 2825 2826 session->ctxt_type = DPAA2_SEC_IPSEC; 2827 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 2828 uint8_t *hdr = NULL; 2829 struct ip ip4_hdr; 2830 struct rte_ipv6_hdr ip6_hdr; 2831 struct ipsec_encap_pdb encap_pdb; 2832 2833 flc->dhr = SEC_FLC_DHR_OUTBOUND; 2834 /* For Sec Proto only one descriptor is required. */ 2835 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb)); 2836 2837 /* copy algo specific data to PDB */ 2838 switch (cipherdata.algtype) { 2839 case OP_PCL_IPSEC_AES_CTR: 2840 encap_pdb.ctr.ctr_initial = 0x00000001; 2841 encap_pdb.ctr.ctr_nonce = ipsec_xform->salt; 2842 break; 2843 case OP_PCL_IPSEC_AES_GCM8: 2844 case OP_PCL_IPSEC_AES_GCM12: 2845 case OP_PCL_IPSEC_AES_GCM16: 2846 memcpy(encap_pdb.gcm.salt, 2847 (uint8_t *)&(ipsec_xform->salt), 4); 2848 break; 2849 } 2850 2851 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) | 2852 PDBOPTS_ESP_OIHI_PDB_INL | 2853 PDBOPTS_ESP_IVSRC | 2854 PDBHMO_ESP_ENCAP_DTTL | 2855 PDBHMO_ESP_SNR; 2856 if (ipsec_xform->options.esn) 2857 encap_pdb.options |= PDBOPTS_ESP_ESN; 2858 encap_pdb.spi = ipsec_xform->spi; 2859 session->dir = DIR_ENC; 2860 if (ipsec_xform->tunnel.type == 2861 RTE_SECURITY_IPSEC_TUNNEL_IPV4) { 2862 encap_pdb.ip_hdr_len = sizeof(struct ip); 2863 ip4_hdr.ip_v = IPVERSION; 2864 ip4_hdr.ip_hl = 5; 2865 ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr)); 2866 ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp; 2867 ip4_hdr.ip_id = 0; 2868 ip4_hdr.ip_off = 0; 2869 ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl; 2870 ip4_hdr.ip_p = IPPROTO_ESP; 2871 ip4_hdr.ip_sum = 0; 2872 ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip; 2873 ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip; 2874 ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *) 2875 &ip4_hdr, sizeof(struct ip)); 2876 hdr = (uint8_t *)&ip4_hdr; 2877 } else if (ipsec_xform->tunnel.type == 2878 RTE_SECURITY_IPSEC_TUNNEL_IPV6) { 2879 ip6_hdr.vtc_flow = rte_cpu_to_be_32( 2880 DPAA2_IPv6_DEFAULT_VTC_FLOW | 2881 ((ipsec_xform->tunnel.ipv6.dscp << 2882 RTE_IPV6_HDR_TC_SHIFT) & 2883 RTE_IPV6_HDR_TC_MASK) | 2884 ((ipsec_xform->tunnel.ipv6.flabel << 2885 RTE_IPV6_HDR_FL_SHIFT) & 2886 RTE_IPV6_HDR_FL_MASK)); 2887 /* Payload length will be updated by HW */ 2888 ip6_hdr.payload_len = 0; 2889 ip6_hdr.hop_limits = 2890 ipsec_xform->tunnel.ipv6.hlimit; 2891 ip6_hdr.proto = (ipsec_xform->proto == 2892 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? 2893 IPPROTO_ESP : IPPROTO_AH; 2894 memcpy(&ip6_hdr.src_addr, 2895 &ipsec_xform->tunnel.ipv6.src_addr, 16); 2896 memcpy(&ip6_hdr.dst_addr, 2897 &ipsec_xform->tunnel.ipv6.dst_addr, 16); 2898 encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr); 2899 hdr = (uint8_t *)&ip6_hdr; 2900 } 2901 2902 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc, 2903 1, 0, SHR_SERIAL, &encap_pdb, 2904 hdr, &cipherdata, &authdata); 2905 } else if (ipsec_xform->direction == 2906 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { 2907 struct ipsec_decap_pdb decap_pdb; 2908 2909 flc->dhr = SEC_FLC_DHR_INBOUND; 2910 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb)); 2911 /* copy algo specific data to PDB */ 2912 switch (cipherdata.algtype) { 2913 case OP_PCL_IPSEC_AES_CTR: 2914 decap_pdb.ctr.ctr_initial = 0x00000001; 2915 decap_pdb.ctr.ctr_nonce = ipsec_xform->salt; 2916 break; 2917 case OP_PCL_IPSEC_AES_GCM8: 2918 case OP_PCL_IPSEC_AES_GCM12: 2919 case OP_PCL_IPSEC_AES_GCM16: 2920 memcpy(decap_pdb.gcm.salt, 2921 (uint8_t *)&(ipsec_xform->salt), 4); 2922 break; 2923 } 2924 2925 decap_pdb.options = (ipsec_xform->tunnel.type == 2926 RTE_SECURITY_IPSEC_TUNNEL_IPV4) ? 2927 sizeof(struct ip) << 16 : 2928 sizeof(struct rte_ipv6_hdr) << 16; 2929 if (ipsec_xform->options.esn) 2930 decap_pdb.options |= PDBOPTS_ESP_ESN; 2931 2932 if (ipsec_xform->replay_win_sz) { 2933 uint32_t win_sz; 2934 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz); 2935 2936 switch (win_sz) { 2937 case 1: 2938 case 2: 2939 case 4: 2940 case 8: 2941 case 16: 2942 case 32: 2943 decap_pdb.options |= PDBOPTS_ESP_ARS32; 2944 break; 2945 case 64: 2946 decap_pdb.options |= PDBOPTS_ESP_ARS64; 2947 break; 2948 default: 2949 decap_pdb.options |= PDBOPTS_ESP_ARS128; 2950 } 2951 } 2952 session->dir = DIR_DEC; 2953 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc, 2954 1, 0, SHR_SERIAL, 2955 &decap_pdb, &cipherdata, &authdata); 2956 } else 2957 goto out; 2958 2959 if (bufsize < 0) { 2960 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2961 goto out; 2962 } 2963 2964 flc->word1_sdl = (uint8_t)bufsize; 2965 2966 /* Enable the stashing control bit */ 2967 DPAA2_SET_FLC_RSC(flc); 2968 flc->word2_rflc_31_0 = lower_32_bits( 2969 (size_t)&(((struct dpaa2_sec_qp *) 2970 dev->data->queue_pairs[0])->rx_vq) | 0x14); 2971 flc->word3_rflc_63_32 = upper_32_bits( 2972 (size_t)&(((struct dpaa2_sec_qp *) 2973 dev->data->queue_pairs[0])->rx_vq)); 2974 2975 /* Set EWS bit i.e. enable write-safe */ 2976 DPAA2_SET_FLC_EWS(flc); 2977 /* Set BS = 1 i.e reuse input buffers as output buffers */ 2978 DPAA2_SET_FLC_REUSE_BS(flc); 2979 /* Set FF = 10; reuse input buffers if they provide sufficient space */ 2980 DPAA2_SET_FLC_REUSE_FF(flc); 2981 2982 session->ctxt = priv; 2983 2984 return 0; 2985 out: 2986 rte_free(session->auth_key.data); 2987 rte_free(session->cipher_key.data); 2988 rte_free(priv); 2989 return ret; 2990 } 2991 2992 static int 2993 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev, 2994 struct rte_security_session_conf *conf, 2995 void *sess) 2996 { 2997 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp; 2998 struct rte_crypto_sym_xform *xform = conf->crypto_xform; 2999 struct rte_crypto_auth_xform *auth_xform = NULL; 3000 struct rte_crypto_cipher_xform *cipher_xform; 3001 dpaa2_sec_session *session = (dpaa2_sec_session *)sess; 3002 struct ctxt_priv *priv; 3003 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 3004 struct alginfo authdata, cipherdata; 3005 struct alginfo *p_authdata = NULL; 3006 int bufsize = -1; 3007 struct sec_flow_context *flc; 3008 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 3009 int swap = true; 3010 #else 3011 int swap = false; 3012 #endif 3013 3014 PMD_INIT_FUNC_TRACE(); 3015 3016 memset(session, 0, sizeof(dpaa2_sec_session)); 3017 3018 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 3019 sizeof(struct ctxt_priv) + 3020 sizeof(struct sec_flc_desc), 3021 RTE_CACHE_LINE_SIZE); 3022 3023 if (priv == NULL) { 3024 DPAA2_SEC_ERR("No memory for priv CTXT"); 3025 return -ENOMEM; 3026 } 3027 3028 priv->fle_pool = dev_priv->fle_pool; 3029 flc = &priv->flc_desc[0].flc; 3030 3031 /* find xfrm types */ 3032 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 3033 cipher_xform = &xform->cipher; 3034 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 3035 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 3036 session->ext_params.aead_ctxt.auth_cipher_text = true; 3037 cipher_xform = &xform->cipher; 3038 auth_xform = &xform->next->auth; 3039 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 3040 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 3041 session->ext_params.aead_ctxt.auth_cipher_text = false; 3042 cipher_xform = &xform->next->cipher; 3043 auth_xform = &xform->auth; 3044 } else { 3045 DPAA2_SEC_ERR("Invalid crypto type"); 3046 return -EINVAL; 3047 } 3048 3049 session->ctxt_type = DPAA2_SEC_PDCP; 3050 if (cipher_xform) { 3051 session->cipher_key.data = rte_zmalloc(NULL, 3052 cipher_xform->key.length, 3053 RTE_CACHE_LINE_SIZE); 3054 if (session->cipher_key.data == NULL && 3055 cipher_xform->key.length > 0) { 3056 DPAA2_SEC_ERR("No Memory for cipher key"); 3057 rte_free(priv); 3058 return -ENOMEM; 3059 } 3060 session->cipher_key.length = cipher_xform->key.length; 3061 memcpy(session->cipher_key.data, cipher_xform->key.data, 3062 cipher_xform->key.length); 3063 session->dir = 3064 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 3065 DIR_ENC : DIR_DEC; 3066 session->cipher_alg = cipher_xform->algo; 3067 } else { 3068 session->cipher_key.data = NULL; 3069 session->cipher_key.length = 0; 3070 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 3071 session->dir = DIR_ENC; 3072 } 3073 3074 session->pdcp.domain = pdcp_xform->domain; 3075 session->pdcp.bearer = pdcp_xform->bearer; 3076 session->pdcp.pkt_dir = pdcp_xform->pkt_dir; 3077 session->pdcp.sn_size = pdcp_xform->sn_size; 3078 session->pdcp.hfn = pdcp_xform->hfn; 3079 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold; 3080 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd; 3081 /* hfv ovd offset location is stored in iv.offset value*/ 3082 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset; 3083 3084 cipherdata.key = (size_t)session->cipher_key.data; 3085 cipherdata.keylen = session->cipher_key.length; 3086 cipherdata.key_enc_flags = 0; 3087 cipherdata.key_type = RTA_DATA_IMM; 3088 3089 switch (session->cipher_alg) { 3090 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 3091 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW; 3092 break; 3093 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 3094 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC; 3095 break; 3096 case RTE_CRYPTO_CIPHER_AES_CTR: 3097 cipherdata.algtype = PDCP_CIPHER_TYPE_AES; 3098 break; 3099 case RTE_CRYPTO_CIPHER_NULL: 3100 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL; 3101 break; 3102 default: 3103 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 3104 session->cipher_alg); 3105 goto out; 3106 } 3107 3108 if (auth_xform) { 3109 session->auth_key.data = rte_zmalloc(NULL, 3110 auth_xform->key.length, 3111 RTE_CACHE_LINE_SIZE); 3112 if (!session->auth_key.data && 3113 auth_xform->key.length > 0) { 3114 DPAA2_SEC_ERR("No Memory for auth key"); 3115 rte_free(session->cipher_key.data); 3116 rte_free(priv); 3117 return -ENOMEM; 3118 } 3119 session->auth_key.length = auth_xform->key.length; 3120 memcpy(session->auth_key.data, auth_xform->key.data, 3121 auth_xform->key.length); 3122 session->auth_alg = auth_xform->algo; 3123 } else { 3124 session->auth_key.data = NULL; 3125 session->auth_key.length = 0; 3126 session->auth_alg = 0; 3127 } 3128 authdata.key = (size_t)session->auth_key.data; 3129 authdata.keylen = session->auth_key.length; 3130 authdata.key_enc_flags = 0; 3131 authdata.key_type = RTA_DATA_IMM; 3132 3133 if (session->auth_alg) { 3134 switch (session->auth_alg) { 3135 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 3136 authdata.algtype = PDCP_AUTH_TYPE_SNOW; 3137 break; 3138 case RTE_CRYPTO_AUTH_ZUC_EIA3: 3139 authdata.algtype = PDCP_AUTH_TYPE_ZUC; 3140 break; 3141 case RTE_CRYPTO_AUTH_AES_CMAC: 3142 authdata.algtype = PDCP_AUTH_TYPE_AES; 3143 break; 3144 case RTE_CRYPTO_AUTH_NULL: 3145 authdata.algtype = PDCP_AUTH_TYPE_NULL; 3146 break; 3147 default: 3148 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 3149 session->auth_alg); 3150 goto out; 3151 } 3152 3153 p_authdata = &authdata; 3154 } else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 3155 DPAA2_SEC_ERR("Crypto: Integrity must for c-plane"); 3156 goto out; 3157 } 3158 3159 if (rta_inline_pdcp_query(authdata.algtype, 3160 cipherdata.algtype, 3161 session->pdcp.sn_size, 3162 session->pdcp.hfn_ovd)) { 3163 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); 3164 cipherdata.key_type = RTA_DATA_PTR; 3165 } 3166 3167 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 3168 if (session->dir == DIR_ENC) 3169 bufsize = cnstr_shdsc_pdcp_c_plane_encap( 3170 priv->flc_desc[0].desc, 1, swap, 3171 pdcp_xform->hfn, 3172 session->pdcp.sn_size, 3173 pdcp_xform->bearer, 3174 pdcp_xform->pkt_dir, 3175 pdcp_xform->hfn_threshold, 3176 &cipherdata, &authdata, 3177 0); 3178 else if (session->dir == DIR_DEC) 3179 bufsize = cnstr_shdsc_pdcp_c_plane_decap( 3180 priv->flc_desc[0].desc, 1, swap, 3181 pdcp_xform->hfn, 3182 session->pdcp.sn_size, 3183 pdcp_xform->bearer, 3184 pdcp_xform->pkt_dir, 3185 pdcp_xform->hfn_threshold, 3186 &cipherdata, &authdata, 3187 0); 3188 } else { 3189 if (session->dir == DIR_ENC) 3190 bufsize = cnstr_shdsc_pdcp_u_plane_encap( 3191 priv->flc_desc[0].desc, 1, swap, 3192 session->pdcp.sn_size, 3193 pdcp_xform->hfn, 3194 pdcp_xform->bearer, 3195 pdcp_xform->pkt_dir, 3196 pdcp_xform->hfn_threshold, 3197 &cipherdata, p_authdata, 0); 3198 else if (session->dir == DIR_DEC) 3199 bufsize = cnstr_shdsc_pdcp_u_plane_decap( 3200 priv->flc_desc[0].desc, 1, swap, 3201 session->pdcp.sn_size, 3202 pdcp_xform->hfn, 3203 pdcp_xform->bearer, 3204 pdcp_xform->pkt_dir, 3205 pdcp_xform->hfn_threshold, 3206 &cipherdata, p_authdata, 0); 3207 } 3208 3209 if (bufsize < 0) { 3210 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 3211 goto out; 3212 } 3213 3214 /* Enable the stashing control bit */ 3215 DPAA2_SET_FLC_RSC(flc); 3216 flc->word2_rflc_31_0 = lower_32_bits( 3217 (size_t)&(((struct dpaa2_sec_qp *) 3218 dev->data->queue_pairs[0])->rx_vq) | 0x14); 3219 flc->word3_rflc_63_32 = upper_32_bits( 3220 (size_t)&(((struct dpaa2_sec_qp *) 3221 dev->data->queue_pairs[0])->rx_vq)); 3222 3223 flc->word1_sdl = (uint8_t)bufsize; 3224 3225 /* TODO - check the perf impact or 3226 * align as per descriptor type 3227 * Set EWS bit i.e. enable write-safe 3228 * DPAA2_SET_FLC_EWS(flc); 3229 */ 3230 3231 /* Set BS = 1 i.e reuse input buffers as output buffers */ 3232 DPAA2_SET_FLC_REUSE_BS(flc); 3233 /* Set FF = 10; reuse input buffers if they provide sufficient space */ 3234 DPAA2_SET_FLC_REUSE_FF(flc); 3235 3236 session->ctxt = priv; 3237 3238 return 0; 3239 out: 3240 rte_free(session->auth_key.data); 3241 rte_free(session->cipher_key.data); 3242 rte_free(priv); 3243 return -EINVAL; 3244 } 3245 3246 static int 3247 dpaa2_sec_security_session_create(void *dev, 3248 struct rte_security_session_conf *conf, 3249 struct rte_security_session *sess, 3250 struct rte_mempool *mempool) 3251 { 3252 void *sess_private_data; 3253 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; 3254 int ret; 3255 3256 if (rte_mempool_get(mempool, &sess_private_data)) { 3257 DPAA2_SEC_ERR("Couldn't get object from session mempool"); 3258 return -ENOMEM; 3259 } 3260 3261 switch (conf->protocol) { 3262 case RTE_SECURITY_PROTOCOL_IPSEC: 3263 ret = dpaa2_sec_set_ipsec_session(cdev, conf, 3264 sess_private_data); 3265 break; 3266 case RTE_SECURITY_PROTOCOL_MACSEC: 3267 return -ENOTSUP; 3268 case RTE_SECURITY_PROTOCOL_PDCP: 3269 ret = dpaa2_sec_set_pdcp_session(cdev, conf, 3270 sess_private_data); 3271 break; 3272 default: 3273 return -EINVAL; 3274 } 3275 if (ret != 0) { 3276 DPAA2_SEC_ERR("Failed to configure session parameters"); 3277 /* Return session to mempool */ 3278 rte_mempool_put(mempool, sess_private_data); 3279 return ret; 3280 } 3281 3282 set_sec_session_private_data(sess, sess_private_data); 3283 3284 return ret; 3285 } 3286 3287 /** Clear the memory of session so it doesn't leave key material behind */ 3288 static int 3289 dpaa2_sec_security_session_destroy(void *dev __rte_unused, 3290 struct rte_security_session *sess) 3291 { 3292 PMD_INIT_FUNC_TRACE(); 3293 void *sess_priv = get_sec_session_private_data(sess); 3294 3295 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv; 3296 3297 if (sess_priv) { 3298 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 3299 3300 rte_free(s->ctxt); 3301 rte_free(s->cipher_key.data); 3302 rte_free(s->auth_key.data); 3303 memset(s, 0, sizeof(dpaa2_sec_session)); 3304 set_sec_session_private_data(sess, NULL); 3305 rte_mempool_put(sess_mp, sess_priv); 3306 } 3307 return 0; 3308 } 3309 #endif 3310 static int 3311 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev, 3312 struct rte_crypto_sym_xform *xform, 3313 struct rte_cryptodev_sym_session *sess, 3314 struct rte_mempool *mempool) 3315 { 3316 void *sess_private_data; 3317 int ret; 3318 3319 if (rte_mempool_get(mempool, &sess_private_data)) { 3320 DPAA2_SEC_ERR("Couldn't get object from session mempool"); 3321 return -ENOMEM; 3322 } 3323 3324 ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data); 3325 if (ret != 0) { 3326 DPAA2_SEC_ERR("Failed to configure session parameters"); 3327 /* Return session to mempool */ 3328 rte_mempool_put(mempool, sess_private_data); 3329 return ret; 3330 } 3331 3332 set_sym_session_private_data(sess, dev->driver_id, 3333 sess_private_data); 3334 3335 return 0; 3336 } 3337 3338 /** Clear the memory of session so it doesn't leave key material behind */ 3339 static void 3340 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev, 3341 struct rte_cryptodev_sym_session *sess) 3342 { 3343 PMD_INIT_FUNC_TRACE(); 3344 uint8_t index = dev->driver_id; 3345 void *sess_priv = get_sym_session_private_data(sess, index); 3346 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv; 3347 3348 if (sess_priv) { 3349 rte_free(s->ctxt); 3350 rte_free(s->cipher_key.data); 3351 rte_free(s->auth_key.data); 3352 memset(s, 0, sizeof(dpaa2_sec_session)); 3353 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 3354 set_sym_session_private_data(sess, index, NULL); 3355 rte_mempool_put(sess_mp, sess_priv); 3356 } 3357 } 3358 3359 static int 3360 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused, 3361 struct rte_cryptodev_config *config __rte_unused) 3362 { 3363 PMD_INIT_FUNC_TRACE(); 3364 3365 return 0; 3366 } 3367 3368 static int 3369 dpaa2_sec_dev_start(struct rte_cryptodev *dev) 3370 { 3371 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3372 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3373 struct dpseci_attr attr; 3374 struct dpaa2_queue *dpaa2_q; 3375 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3376 dev->data->queue_pairs; 3377 struct dpseci_rx_queue_attr rx_attr; 3378 struct dpseci_tx_queue_attr tx_attr; 3379 int ret, i; 3380 3381 PMD_INIT_FUNC_TRACE(); 3382 3383 memset(&attr, 0, sizeof(struct dpseci_attr)); 3384 3385 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token); 3386 if (ret) { 3387 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED", 3388 priv->hw_id); 3389 goto get_attr_failure; 3390 } 3391 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr); 3392 if (ret) { 3393 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC"); 3394 goto get_attr_failure; 3395 } 3396 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) { 3397 dpaa2_q = &qp[i]->rx_vq; 3398 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 3399 &rx_attr); 3400 dpaa2_q->fqid = rx_attr.fqid; 3401 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid); 3402 } 3403 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) { 3404 dpaa2_q = &qp[i]->tx_vq; 3405 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 3406 &tx_attr); 3407 dpaa2_q->fqid = tx_attr.fqid; 3408 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid); 3409 } 3410 3411 return 0; 3412 get_attr_failure: 3413 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 3414 return -1; 3415 } 3416 3417 static void 3418 dpaa2_sec_dev_stop(struct rte_cryptodev *dev) 3419 { 3420 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3421 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3422 int ret; 3423 3424 PMD_INIT_FUNC_TRACE(); 3425 3426 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 3427 if (ret) { 3428 DPAA2_SEC_ERR("Failure in disabling dpseci %d device", 3429 priv->hw_id); 3430 return; 3431 } 3432 3433 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token); 3434 if (ret < 0) { 3435 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret); 3436 return; 3437 } 3438 } 3439 3440 static int 3441 dpaa2_sec_dev_close(struct rte_cryptodev *dev) 3442 { 3443 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3444 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3445 int ret; 3446 3447 PMD_INIT_FUNC_TRACE(); 3448 3449 /* Function is reverse of dpaa2_sec_dev_init. 3450 * It does the following: 3451 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id 3452 * 2. Close the DPSECI device 3453 * 3. Free the allocated resources. 3454 */ 3455 3456 /*Close the device at underlying layer*/ 3457 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token); 3458 if (ret) { 3459 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret); 3460 return -1; 3461 } 3462 3463 /*Free the allocated memory for ethernet private data and dpseci*/ 3464 priv->hw = NULL; 3465 rte_free(dpseci); 3466 3467 return 0; 3468 } 3469 3470 static void 3471 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev, 3472 struct rte_cryptodev_info *info) 3473 { 3474 struct dpaa2_sec_dev_private *internals = dev->data->dev_private; 3475 3476 PMD_INIT_FUNC_TRACE(); 3477 if (info != NULL) { 3478 info->max_nb_queue_pairs = internals->max_nb_queue_pairs; 3479 info->feature_flags = dev->feature_flags; 3480 info->capabilities = dpaa2_sec_capabilities; 3481 /* No limit of number of sessions */ 3482 info->sym.max_nb_sessions = 0; 3483 info->driver_id = cryptodev_driver_id; 3484 } 3485 } 3486 3487 static 3488 void dpaa2_sec_stats_get(struct rte_cryptodev *dev, 3489 struct rte_cryptodev_stats *stats) 3490 { 3491 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3492 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3493 struct dpseci_sec_counters counters = {0}; 3494 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3495 dev->data->queue_pairs; 3496 int ret, i; 3497 3498 PMD_INIT_FUNC_TRACE(); 3499 if (stats == NULL) { 3500 DPAA2_SEC_ERR("Invalid stats ptr NULL"); 3501 return; 3502 } 3503 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 3504 if (qp[i] == NULL) { 3505 DPAA2_SEC_DEBUG("Uninitialised queue pair"); 3506 continue; 3507 } 3508 3509 stats->enqueued_count += qp[i]->tx_vq.tx_pkts; 3510 stats->dequeued_count += qp[i]->rx_vq.rx_pkts; 3511 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts; 3512 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts; 3513 } 3514 3515 ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token, 3516 &counters); 3517 if (ret) { 3518 DPAA2_SEC_ERR("SEC counters failed"); 3519 } else { 3520 DPAA2_SEC_INFO("dpseci hardware stats:" 3521 "\n\tNum of Requests Dequeued = %" PRIu64 3522 "\n\tNum of Outbound Encrypt Requests = %" PRIu64 3523 "\n\tNum of Inbound Decrypt Requests = %" PRIu64 3524 "\n\tNum of Outbound Bytes Encrypted = %" PRIu64 3525 "\n\tNum of Outbound Bytes Protected = %" PRIu64 3526 "\n\tNum of Inbound Bytes Decrypted = %" PRIu64 3527 "\n\tNum of Inbound Bytes Validated = %" PRIu64, 3528 counters.dequeued_requests, 3529 counters.ob_enc_requests, 3530 counters.ib_dec_requests, 3531 counters.ob_enc_bytes, 3532 counters.ob_prot_bytes, 3533 counters.ib_dec_bytes, 3534 counters.ib_valid_bytes); 3535 } 3536 } 3537 3538 static 3539 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev) 3540 { 3541 int i; 3542 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3543 (dev->data->queue_pairs); 3544 3545 PMD_INIT_FUNC_TRACE(); 3546 3547 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 3548 if (qp[i] == NULL) { 3549 DPAA2_SEC_DEBUG("Uninitialised queue pair"); 3550 continue; 3551 } 3552 qp[i]->tx_vq.rx_pkts = 0; 3553 qp[i]->tx_vq.tx_pkts = 0; 3554 qp[i]->tx_vq.err_pkts = 0; 3555 qp[i]->rx_vq.rx_pkts = 0; 3556 qp[i]->rx_vq.tx_pkts = 0; 3557 qp[i]->rx_vq.err_pkts = 0; 3558 } 3559 } 3560 3561 static void __rte_hot 3562 dpaa2_sec_process_parallel_event(struct qbman_swp *swp, 3563 const struct qbman_fd *fd, 3564 const struct qbman_result *dq, 3565 struct dpaa2_queue *rxq, 3566 struct rte_event *ev) 3567 { 3568 /* Prefetching mbuf */ 3569 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)- 3570 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size)); 3571 3572 /* Prefetching ipsec crypto_op stored in priv data of mbuf */ 3573 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64)); 3574 3575 ev->flow_id = rxq->ev.flow_id; 3576 ev->sub_event_type = rxq->ev.sub_event_type; 3577 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3578 ev->op = RTE_EVENT_OP_NEW; 3579 ev->sched_type = rxq->ev.sched_type; 3580 ev->queue_id = rxq->ev.queue_id; 3581 ev->priority = rxq->ev.priority; 3582 ev->event_ptr = sec_fd_to_mbuf(fd); 3583 3584 qbman_swp_dqrr_consume(swp, dq); 3585 } 3586 static void 3587 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused, 3588 const struct qbman_fd *fd, 3589 const struct qbman_result *dq, 3590 struct dpaa2_queue *rxq, 3591 struct rte_event *ev) 3592 { 3593 uint8_t dqrr_index; 3594 struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr; 3595 /* Prefetching mbuf */ 3596 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)- 3597 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size)); 3598 3599 /* Prefetching ipsec crypto_op stored in priv data of mbuf */ 3600 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64)); 3601 3602 ev->flow_id = rxq->ev.flow_id; 3603 ev->sub_event_type = rxq->ev.sub_event_type; 3604 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3605 ev->op = RTE_EVENT_OP_NEW; 3606 ev->sched_type = rxq->ev.sched_type; 3607 ev->queue_id = rxq->ev.queue_id; 3608 ev->priority = rxq->ev.priority; 3609 3610 ev->event_ptr = sec_fd_to_mbuf(fd); 3611 dqrr_index = qbman_get_dqrr_idx(dq); 3612 crypto_op->sym->m_src->seqn = dqrr_index + 1; 3613 DPAA2_PER_LCORE_DQRR_SIZE++; 3614 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; 3615 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src; 3616 } 3617 3618 int 3619 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev, 3620 int qp_id, 3621 struct dpaa2_dpcon_dev *dpcon, 3622 const struct rte_event *event) 3623 { 3624 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3625 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3626 struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id]; 3627 struct dpseci_rx_queue_cfg cfg; 3628 uint8_t priority; 3629 int ret; 3630 3631 if (event->sched_type == RTE_SCHED_TYPE_PARALLEL) 3632 qp->rx_vq.cb = dpaa2_sec_process_parallel_event; 3633 else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) 3634 qp->rx_vq.cb = dpaa2_sec_process_atomic_event; 3635 else 3636 return -EINVAL; 3637 3638 priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / event->priority) * 3639 (dpcon->num_priorities - 1); 3640 3641 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 3642 cfg.options = DPSECI_QUEUE_OPT_DEST; 3643 cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON; 3644 cfg.dest_cfg.dest_id = dpcon->dpcon_id; 3645 cfg.dest_cfg.priority = priority; 3646 3647 cfg.options |= DPSECI_QUEUE_OPT_USER_CTX; 3648 cfg.user_ctx = (size_t)(qp); 3649 if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) { 3650 cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION; 3651 cfg.order_preservation_en = 1; 3652 } 3653 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 3654 qp_id, &cfg); 3655 if (ret) { 3656 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret); 3657 return ret; 3658 } 3659 3660 memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event)); 3661 3662 return 0; 3663 } 3664 3665 int 3666 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev, 3667 int qp_id) 3668 { 3669 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3670 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3671 struct dpseci_rx_queue_cfg cfg; 3672 int ret; 3673 3674 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 3675 cfg.options = DPSECI_QUEUE_OPT_DEST; 3676 cfg.dest_cfg.dest_type = DPSECI_DEST_NONE; 3677 3678 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 3679 qp_id, &cfg); 3680 if (ret) 3681 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret); 3682 3683 return ret; 3684 } 3685 3686 static struct rte_cryptodev_ops crypto_ops = { 3687 .dev_configure = dpaa2_sec_dev_configure, 3688 .dev_start = dpaa2_sec_dev_start, 3689 .dev_stop = dpaa2_sec_dev_stop, 3690 .dev_close = dpaa2_sec_dev_close, 3691 .dev_infos_get = dpaa2_sec_dev_infos_get, 3692 .stats_get = dpaa2_sec_stats_get, 3693 .stats_reset = dpaa2_sec_stats_reset, 3694 .queue_pair_setup = dpaa2_sec_queue_pair_setup, 3695 .queue_pair_release = dpaa2_sec_queue_pair_release, 3696 .sym_session_get_size = dpaa2_sec_sym_session_get_size, 3697 .sym_session_configure = dpaa2_sec_sym_session_configure, 3698 .sym_session_clear = dpaa2_sec_sym_session_clear, 3699 }; 3700 3701 #ifdef RTE_LIBRTE_SECURITY 3702 static const struct rte_security_capability * 3703 dpaa2_sec_capabilities_get(void *device __rte_unused) 3704 { 3705 return dpaa2_sec_security_cap; 3706 } 3707 3708 static const struct rte_security_ops dpaa2_sec_security_ops = { 3709 .session_create = dpaa2_sec_security_session_create, 3710 .session_update = NULL, 3711 .session_stats_get = NULL, 3712 .session_destroy = dpaa2_sec_security_session_destroy, 3713 .set_pkt_metadata = NULL, 3714 .capabilities_get = dpaa2_sec_capabilities_get 3715 }; 3716 #endif 3717 3718 static int 3719 dpaa2_sec_uninit(const struct rte_cryptodev *dev) 3720 { 3721 struct dpaa2_sec_dev_private *internals = dev->data->dev_private; 3722 3723 rte_free(dev->security_ctx); 3724 3725 rte_mempool_free(internals->fle_pool); 3726 3727 DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u", 3728 dev->data->name, rte_socket_id()); 3729 3730 return 0; 3731 } 3732 3733 static int 3734 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev) 3735 { 3736 struct dpaa2_sec_dev_private *internals; 3737 struct rte_device *dev = cryptodev->device; 3738 struct rte_dpaa2_device *dpaa2_dev; 3739 #ifdef RTE_LIBRTE_SECURITY 3740 struct rte_security_ctx *security_instance; 3741 #endif 3742 struct fsl_mc_io *dpseci; 3743 uint16_t token; 3744 struct dpseci_attr attr; 3745 int retcode, hw_id; 3746 char str[30]; 3747 3748 PMD_INIT_FUNC_TRACE(); 3749 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 3750 hw_id = dpaa2_dev->object_id; 3751 3752 cryptodev->driver_id = cryptodev_driver_id; 3753 cryptodev->dev_ops = &crypto_ops; 3754 3755 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst; 3756 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst; 3757 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 3758 RTE_CRYPTODEV_FF_HW_ACCELERATED | 3759 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 3760 RTE_CRYPTODEV_FF_SECURITY | 3761 RTE_CRYPTODEV_FF_IN_PLACE_SGL | 3762 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | 3763 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | 3764 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | 3765 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; 3766 3767 internals = cryptodev->data->dev_private; 3768 3769 /* 3770 * For secondary processes, we don't initialise any further as primary 3771 * has already done this work. Only check we don't need a different 3772 * RX function 3773 */ 3774 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 3775 DPAA2_SEC_DEBUG("Device already init by primary process"); 3776 return 0; 3777 } 3778 #ifdef RTE_LIBRTE_SECURITY 3779 /* Initialize security_ctx only for primary process*/ 3780 security_instance = rte_malloc("rte_security_instances_ops", 3781 sizeof(struct rte_security_ctx), 0); 3782 if (security_instance == NULL) 3783 return -ENOMEM; 3784 security_instance->device = (void *)cryptodev; 3785 security_instance->ops = &dpaa2_sec_security_ops; 3786 security_instance->sess_cnt = 0; 3787 cryptodev->security_ctx = security_instance; 3788 #endif 3789 /*Open the rte device via MC and save the handle for further use*/ 3790 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1, 3791 sizeof(struct fsl_mc_io), 0); 3792 if (!dpseci) { 3793 DPAA2_SEC_ERR( 3794 "Error in allocating the memory for dpsec object"); 3795 return -ENOMEM; 3796 } 3797 dpseci->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX); 3798 3799 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token); 3800 if (retcode != 0) { 3801 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x", 3802 retcode); 3803 goto init_error; 3804 } 3805 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr); 3806 if (retcode != 0) { 3807 DPAA2_SEC_ERR( 3808 "Cannot get dpsec device attributed: Error = %x", 3809 retcode); 3810 goto init_error; 3811 } 3812 snprintf(cryptodev->data->name, sizeof(cryptodev->data->name), 3813 "dpsec-%u", hw_id); 3814 3815 internals->max_nb_queue_pairs = attr.num_tx_queues; 3816 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs; 3817 internals->hw = dpseci; 3818 internals->token = token; 3819 3820 snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d", 3821 getpid(), cryptodev->data->dev_id); 3822 internals->fle_pool = rte_mempool_create((const char *)str, 3823 FLE_POOL_NUM_BUFS, 3824 FLE_POOL_BUF_SIZE, 3825 FLE_POOL_CACHE_SIZE, 0, 3826 NULL, NULL, NULL, NULL, 3827 SOCKET_ID_ANY, 0); 3828 if (!internals->fle_pool) { 3829 DPAA2_SEC_ERR("Mempool (%s) creation failed", str); 3830 goto init_error; 3831 } 3832 3833 DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name); 3834 return 0; 3835 3836 init_error: 3837 DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name); 3838 3839 /* dpaa2_sec_uninit(crypto_dev_name); */ 3840 return -EFAULT; 3841 } 3842 3843 static int 3844 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused, 3845 struct rte_dpaa2_device *dpaa2_dev) 3846 { 3847 struct rte_cryptodev *cryptodev; 3848 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 3849 3850 int retval; 3851 3852 snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d", 3853 dpaa2_dev->object_id); 3854 3855 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id()); 3856 if (cryptodev == NULL) 3857 return -ENOMEM; 3858 3859 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 3860 cryptodev->data->dev_private = rte_zmalloc_socket( 3861 "cryptodev private structure", 3862 sizeof(struct dpaa2_sec_dev_private), 3863 RTE_CACHE_LINE_SIZE, 3864 rte_socket_id()); 3865 3866 if (cryptodev->data->dev_private == NULL) 3867 rte_panic("Cannot allocate memzone for private " 3868 "device data"); 3869 } 3870 3871 dpaa2_dev->cryptodev = cryptodev; 3872 cryptodev->device = &dpaa2_dev->device; 3873 3874 /* init user callbacks */ 3875 TAILQ_INIT(&(cryptodev->link_intr_cbs)); 3876 3877 if (dpaa2_svr_family == SVR_LX2160A) 3878 rta_set_sec_era(RTA_SEC_ERA_10); 3879 else 3880 rta_set_sec_era(RTA_SEC_ERA_8); 3881 3882 DPAA2_SEC_INFO("2-SEC ERA is %d", rta_get_sec_era()); 3883 3884 /* Invoke PMD device initialization function */ 3885 retval = dpaa2_sec_dev_init(cryptodev); 3886 if (retval == 0) 3887 return 0; 3888 3889 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 3890 rte_free(cryptodev->data->dev_private); 3891 3892 cryptodev->attached = RTE_CRYPTODEV_DETACHED; 3893 3894 return -ENXIO; 3895 } 3896 3897 static int 3898 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev) 3899 { 3900 struct rte_cryptodev *cryptodev; 3901 int ret; 3902 3903 cryptodev = dpaa2_dev->cryptodev; 3904 if (cryptodev == NULL) 3905 return -ENODEV; 3906 3907 ret = dpaa2_sec_uninit(cryptodev); 3908 if (ret) 3909 return ret; 3910 3911 return rte_cryptodev_pmd_destroy(cryptodev); 3912 } 3913 3914 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = { 3915 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA, 3916 .drv_type = DPAA2_CRYPTO, 3917 .driver = { 3918 .name = "DPAA2 SEC PMD" 3919 }, 3920 .probe = cryptodev_dpaa2_sec_probe, 3921 .remove = cryptodev_dpaa2_sec_remove, 3922 }; 3923 3924 static struct cryptodev_driver dpaa2_sec_crypto_drv; 3925 3926 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver); 3927 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, 3928 rte_dpaa2_sec_driver.driver, cryptodev_driver_id); 3929 RTE_LOG_REGISTER(dpaa2_logtype_sec, pmd.crypto.dpaa2, NOTICE); 3930