1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2016-2020 NXP 5 * 6 */ 7 8 #include <time.h> 9 #include <net/if.h> 10 #include <unistd.h> 11 12 #include <rte_ip.h> 13 #include <rte_mbuf.h> 14 #include <rte_cryptodev.h> 15 #include <rte_malloc.h> 16 #include <rte_memcpy.h> 17 #include <rte_string_fns.h> 18 #include <rte_cycles.h> 19 #include <rte_kvargs.h> 20 #include <rte_dev.h> 21 #include <rte_cryptodev_pmd.h> 22 #include <rte_common.h> 23 #include <rte_fslmc.h> 24 #include <fslmc_vfio.h> 25 #include <dpaa2_hw_pvt.h> 26 #include <dpaa2_hw_dpio.h> 27 #include <dpaa2_hw_mempool.h> 28 #include <fsl_dpopr.h> 29 #include <fsl_dpseci.h> 30 #include <fsl_mc_sys.h> 31 32 #include "dpaa2_sec_priv.h" 33 #include "dpaa2_sec_event.h" 34 #include "dpaa2_sec_logs.h" 35 36 /* RTA header files */ 37 #include <desc/ipsec.h> 38 #include <desc/pdcp.h> 39 #include <desc/algo.h> 40 41 /* Minimum job descriptor consists of a oneword job descriptor HEADER and 42 * a pointer to the shared descriptor 43 */ 44 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ) 45 #define FSL_VENDOR_ID 0x1957 46 #define FSL_DEVICE_ID 0x410 47 #define FSL_SUBSYSTEM_SEC 1 48 #define FSL_MC_DPSECI_DEVID 3 49 50 #define NO_PREFETCH 0 51 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */ 52 #define FLE_POOL_NUM_BUFS 32000 53 #define FLE_POOL_BUF_SIZE 256 54 #define FLE_POOL_CACHE_SIZE 512 55 #define FLE_SG_MEM_SIZE(num) (FLE_POOL_BUF_SIZE + ((num) * 32)) 56 #define SEC_FLC_DHR_OUTBOUND -114 57 #define SEC_FLC_DHR_INBOUND 0 58 59 static uint8_t cryptodev_driver_id; 60 61 #ifdef RTE_LIBRTE_SECURITY 62 static inline int 63 build_proto_compound_sg_fd(dpaa2_sec_session *sess, 64 struct rte_crypto_op *op, 65 struct qbman_fd *fd, uint16_t bpid) 66 { 67 struct rte_crypto_sym_op *sym_op = op->sym; 68 struct ctxt_priv *priv = sess->ctxt; 69 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 70 struct sec_flow_context *flc; 71 struct rte_mbuf *mbuf; 72 uint32_t in_len = 0, out_len = 0; 73 74 if (sym_op->m_dst) 75 mbuf = sym_op->m_dst; 76 else 77 mbuf = sym_op->m_src; 78 79 /* first FLE entry used to store mbuf and session ctxt */ 80 fle = (struct qbman_fle *)rte_malloc(NULL, 81 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 82 RTE_CACHE_LINE_SIZE); 83 if (unlikely(!fle)) { 84 DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE"); 85 return -ENOMEM; 86 } 87 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 88 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 89 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 90 91 /* Save the shared descriptor */ 92 flc = &priv->flc_desc[0].flc; 93 94 op_fle = fle + 1; 95 ip_fle = fle + 2; 96 sge = fle + 3; 97 98 if (likely(bpid < MAX_BPID)) { 99 DPAA2_SET_FD_BPID(fd, bpid); 100 DPAA2_SET_FLE_BPID(op_fle, bpid); 101 DPAA2_SET_FLE_BPID(ip_fle, bpid); 102 } else { 103 DPAA2_SET_FD_IVP(fd); 104 DPAA2_SET_FLE_IVP(op_fle); 105 DPAA2_SET_FLE_IVP(ip_fle); 106 } 107 108 /* Configure FD as a FRAME LIST */ 109 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 110 DPAA2_SET_FD_COMPOUND_FMT(fd); 111 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 112 113 /* Configure Output FLE with Scatter/Gather Entry */ 114 DPAA2_SET_FLE_SG_EXT(op_fle); 115 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 116 117 /* Configure Output SGE for Encap/Decap */ 118 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 119 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 120 /* o/p segs */ 121 while (mbuf->next) { 122 sge->length = mbuf->data_len; 123 out_len += sge->length; 124 sge++; 125 mbuf = mbuf->next; 126 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 127 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 128 } 129 /* using buf_len for last buf - so that extra data can be added */ 130 sge->length = mbuf->buf_len - mbuf->data_off; 131 out_len += sge->length; 132 133 DPAA2_SET_FLE_FIN(sge); 134 op_fle->length = out_len; 135 136 sge++; 137 mbuf = sym_op->m_src; 138 139 /* Configure Input FLE with Scatter/Gather Entry */ 140 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 141 DPAA2_SET_FLE_SG_EXT(ip_fle); 142 DPAA2_SET_FLE_FIN(ip_fle); 143 144 /* Configure input SGE for Encap/Decap */ 145 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 146 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 147 sge->length = mbuf->data_len; 148 in_len += sge->length; 149 150 mbuf = mbuf->next; 151 /* i/p segs */ 152 while (mbuf) { 153 sge++; 154 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 155 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 156 sge->length = mbuf->data_len; 157 in_len += sge->length; 158 mbuf = mbuf->next; 159 } 160 ip_fle->length = in_len; 161 DPAA2_SET_FLE_FIN(sge); 162 163 /* In case of PDCP, per packet HFN is stored in 164 * mbuf priv after sym_op. 165 */ 166 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) { 167 uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op + 168 sess->pdcp.hfn_ovd_offset); 169 /*enable HFN override override */ 170 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd); 171 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd); 172 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd); 173 } 174 DPAA2_SET_FD_LEN(fd, ip_fle->length); 175 176 return 0; 177 } 178 179 static inline int 180 build_proto_compound_fd(dpaa2_sec_session *sess, 181 struct rte_crypto_op *op, 182 struct qbman_fd *fd, uint16_t bpid) 183 { 184 struct rte_crypto_sym_op *sym_op = op->sym; 185 struct ctxt_priv *priv = sess->ctxt; 186 struct qbman_fle *fle, *ip_fle, *op_fle; 187 struct sec_flow_context *flc; 188 struct rte_mbuf *src_mbuf = sym_op->m_src; 189 struct rte_mbuf *dst_mbuf = sym_op->m_dst; 190 int retval; 191 192 if (!dst_mbuf) 193 dst_mbuf = src_mbuf; 194 195 /* Save the shared descriptor */ 196 flc = &priv->flc_desc[0].flc; 197 198 /* we are using the first FLE entry to store Mbuf */ 199 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 200 if (retval) { 201 DPAA2_SEC_DP_ERR("Memory alloc failed"); 202 return -ENOMEM; 203 } 204 memset(fle, 0, FLE_POOL_BUF_SIZE); 205 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 206 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 207 208 op_fle = fle + 1; 209 ip_fle = fle + 2; 210 211 if (likely(bpid < MAX_BPID)) { 212 DPAA2_SET_FD_BPID(fd, bpid); 213 DPAA2_SET_FLE_BPID(op_fle, bpid); 214 DPAA2_SET_FLE_BPID(ip_fle, bpid); 215 } else { 216 DPAA2_SET_FD_IVP(fd); 217 DPAA2_SET_FLE_IVP(op_fle); 218 DPAA2_SET_FLE_IVP(ip_fle); 219 } 220 221 /* Configure FD as a FRAME LIST */ 222 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 223 DPAA2_SET_FD_COMPOUND_FMT(fd); 224 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 225 226 /* Configure Output FLE with dst mbuf data */ 227 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf)); 228 DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off); 229 DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len); 230 231 /* Configure Input FLE with src mbuf data */ 232 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf)); 233 DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off); 234 DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len); 235 236 DPAA2_SET_FD_LEN(fd, ip_fle->length); 237 DPAA2_SET_FLE_FIN(ip_fle); 238 239 /* In case of PDCP, per packet HFN is stored in 240 * mbuf priv after sym_op. 241 */ 242 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) { 243 uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op + 244 sess->pdcp.hfn_ovd_offset); 245 /*enable HFN override override */ 246 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd); 247 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd); 248 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd); 249 } 250 251 return 0; 252 253 } 254 255 static inline int 256 build_proto_fd(dpaa2_sec_session *sess, 257 struct rte_crypto_op *op, 258 struct qbman_fd *fd, uint16_t bpid) 259 { 260 struct rte_crypto_sym_op *sym_op = op->sym; 261 if (sym_op->m_dst) 262 return build_proto_compound_fd(sess, op, fd, bpid); 263 264 struct ctxt_priv *priv = sess->ctxt; 265 struct sec_flow_context *flc; 266 struct rte_mbuf *mbuf = sym_op->m_src; 267 268 if (likely(bpid < MAX_BPID)) 269 DPAA2_SET_FD_BPID(fd, bpid); 270 else 271 DPAA2_SET_FD_IVP(fd); 272 273 /* Save the shared descriptor */ 274 flc = &priv->flc_desc[0].flc; 275 276 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 277 DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off); 278 DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len); 279 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 280 281 /* save physical address of mbuf */ 282 op->sym->aead.digest.phys_addr = mbuf->buf_iova; 283 mbuf->buf_iova = (size_t)op; 284 285 return 0; 286 } 287 #endif 288 289 static inline int 290 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess, 291 struct rte_crypto_op *op, 292 struct qbman_fd *fd, __rte_unused uint16_t bpid) 293 { 294 struct rte_crypto_sym_op *sym_op = op->sym; 295 struct ctxt_priv *priv = sess->ctxt; 296 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 297 struct sec_flow_context *flc; 298 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len; 299 int icv_len = sess->digest_length; 300 uint8_t *old_icv; 301 struct rte_mbuf *mbuf; 302 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 303 sess->iv.offset); 304 305 if (sym_op->m_dst) 306 mbuf = sym_op->m_dst; 307 else 308 mbuf = sym_op->m_src; 309 310 /* first FLE entry used to store mbuf and session ctxt */ 311 fle = (struct qbman_fle *)rte_malloc(NULL, 312 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 313 RTE_CACHE_LINE_SIZE); 314 if (unlikely(!fle)) { 315 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE"); 316 return -ENOMEM; 317 } 318 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 319 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 320 DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv); 321 322 op_fle = fle + 1; 323 ip_fle = fle + 2; 324 sge = fle + 3; 325 326 /* Save the shared descriptor */ 327 flc = &priv->flc_desc[0].flc; 328 329 /* Configure FD as a FRAME LIST */ 330 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 331 DPAA2_SET_FD_COMPOUND_FMT(fd); 332 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 333 334 DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n" 335 "iv-len=%d data_off: 0x%x\n", 336 sym_op->aead.data.offset, 337 sym_op->aead.data.length, 338 sess->digest_length, 339 sess->iv.length, 340 sym_op->m_src->data_off); 341 342 /* Configure Output FLE with Scatter/Gather Entry */ 343 DPAA2_SET_FLE_SG_EXT(op_fle); 344 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 345 346 if (auth_only_len) 347 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len); 348 349 op_fle->length = (sess->dir == DIR_ENC) ? 350 (sym_op->aead.data.length + icv_len) : 351 sym_op->aead.data.length; 352 353 /* Configure Output SGE for Encap/Decap */ 354 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 355 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset); 356 sge->length = mbuf->data_len - sym_op->aead.data.offset; 357 358 mbuf = mbuf->next; 359 /* o/p segs */ 360 while (mbuf) { 361 sge++; 362 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 363 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 364 sge->length = mbuf->data_len; 365 mbuf = mbuf->next; 366 } 367 sge->length -= icv_len; 368 369 if (sess->dir == DIR_ENC) { 370 sge++; 371 DPAA2_SET_FLE_ADDR(sge, 372 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data)); 373 sge->length = icv_len; 374 } 375 DPAA2_SET_FLE_FIN(sge); 376 377 sge++; 378 mbuf = sym_op->m_src; 379 380 /* Configure Input FLE with Scatter/Gather Entry */ 381 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 382 DPAA2_SET_FLE_SG_EXT(ip_fle); 383 DPAA2_SET_FLE_FIN(ip_fle); 384 ip_fle->length = (sess->dir == DIR_ENC) ? 385 (sym_op->aead.data.length + sess->iv.length + auth_only_len) : 386 (sym_op->aead.data.length + sess->iv.length + auth_only_len + 387 icv_len); 388 389 /* Configure Input SGE for Encap/Decap */ 390 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr)); 391 sge->length = sess->iv.length; 392 393 sge++; 394 if (auth_only_len) { 395 DPAA2_SET_FLE_ADDR(sge, 396 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data)); 397 sge->length = auth_only_len; 398 sge++; 399 } 400 401 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 402 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + 403 mbuf->data_off); 404 sge->length = mbuf->data_len - sym_op->aead.data.offset; 405 406 mbuf = mbuf->next; 407 /* i/p segs */ 408 while (mbuf) { 409 sge++; 410 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 411 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 412 sge->length = mbuf->data_len; 413 mbuf = mbuf->next; 414 } 415 416 if (sess->dir == DIR_DEC) { 417 sge++; 418 old_icv = (uint8_t *)(sge + 1); 419 memcpy(old_icv, sym_op->aead.digest.data, icv_len); 420 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 421 sge->length = icv_len; 422 } 423 424 DPAA2_SET_FLE_FIN(sge); 425 if (auth_only_len) { 426 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len); 427 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 428 } 429 DPAA2_SET_FD_LEN(fd, ip_fle->length); 430 431 return 0; 432 } 433 434 static inline int 435 build_authenc_gcm_fd(dpaa2_sec_session *sess, 436 struct rte_crypto_op *op, 437 struct qbman_fd *fd, uint16_t bpid) 438 { 439 struct rte_crypto_sym_op *sym_op = op->sym; 440 struct ctxt_priv *priv = sess->ctxt; 441 struct qbman_fle *fle, *sge; 442 struct sec_flow_context *flc; 443 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len; 444 int icv_len = sess->digest_length, retval; 445 uint8_t *old_icv; 446 struct rte_mbuf *dst; 447 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 448 sess->iv.offset); 449 450 if (sym_op->m_dst) 451 dst = sym_op->m_dst; 452 else 453 dst = sym_op->m_src; 454 455 /* TODO we are using the first FLE entry to store Mbuf and session ctxt. 456 * Currently we donot know which FLE has the mbuf stored. 457 * So while retreiving we can go back 1 FLE from the FD -ADDR 458 * to get the MBUF Addr from the previous FLE. 459 * We can have a better approach to use the inline Mbuf 460 */ 461 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 462 if (retval) { 463 DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE"); 464 return -ENOMEM; 465 } 466 memset(fle, 0, FLE_POOL_BUF_SIZE); 467 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 468 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 469 fle = fle + 1; 470 sge = fle + 2; 471 if (likely(bpid < MAX_BPID)) { 472 DPAA2_SET_FD_BPID(fd, bpid); 473 DPAA2_SET_FLE_BPID(fle, bpid); 474 DPAA2_SET_FLE_BPID(fle + 1, bpid); 475 DPAA2_SET_FLE_BPID(sge, bpid); 476 DPAA2_SET_FLE_BPID(sge + 1, bpid); 477 DPAA2_SET_FLE_BPID(sge + 2, bpid); 478 DPAA2_SET_FLE_BPID(sge + 3, bpid); 479 } else { 480 DPAA2_SET_FD_IVP(fd); 481 DPAA2_SET_FLE_IVP(fle); 482 DPAA2_SET_FLE_IVP((fle + 1)); 483 DPAA2_SET_FLE_IVP(sge); 484 DPAA2_SET_FLE_IVP((sge + 1)); 485 DPAA2_SET_FLE_IVP((sge + 2)); 486 DPAA2_SET_FLE_IVP((sge + 3)); 487 } 488 489 /* Save the shared descriptor */ 490 flc = &priv->flc_desc[0].flc; 491 /* Configure FD as a FRAME LIST */ 492 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 493 DPAA2_SET_FD_COMPOUND_FMT(fd); 494 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 495 496 DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n" 497 "iv-len=%d data_off: 0x%x\n", 498 sym_op->aead.data.offset, 499 sym_op->aead.data.length, 500 sess->digest_length, 501 sess->iv.length, 502 sym_op->m_src->data_off); 503 504 /* Configure Output FLE with Scatter/Gather Entry */ 505 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 506 if (auth_only_len) 507 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 508 fle->length = (sess->dir == DIR_ENC) ? 509 (sym_op->aead.data.length + icv_len) : 510 sym_op->aead.data.length; 511 512 DPAA2_SET_FLE_SG_EXT(fle); 513 514 /* Configure Output SGE for Encap/Decap */ 515 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 516 DPAA2_SET_FLE_OFFSET(sge, dst->data_off + sym_op->aead.data.offset); 517 sge->length = sym_op->aead.data.length; 518 519 if (sess->dir == DIR_ENC) { 520 sge++; 521 DPAA2_SET_FLE_ADDR(sge, 522 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data)); 523 sge->length = sess->digest_length; 524 } 525 DPAA2_SET_FLE_FIN(sge); 526 527 sge++; 528 fle++; 529 530 /* Configure Input FLE with Scatter/Gather Entry */ 531 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 532 DPAA2_SET_FLE_SG_EXT(fle); 533 DPAA2_SET_FLE_FIN(fle); 534 fle->length = (sess->dir == DIR_ENC) ? 535 (sym_op->aead.data.length + sess->iv.length + auth_only_len) : 536 (sym_op->aead.data.length + sess->iv.length + auth_only_len + 537 sess->digest_length); 538 539 /* Configure Input SGE for Encap/Decap */ 540 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr)); 541 sge->length = sess->iv.length; 542 sge++; 543 if (auth_only_len) { 544 DPAA2_SET_FLE_ADDR(sge, 545 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data)); 546 sge->length = auth_only_len; 547 DPAA2_SET_FLE_BPID(sge, bpid); 548 sge++; 549 } 550 551 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 552 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + 553 sym_op->m_src->data_off); 554 sge->length = sym_op->aead.data.length; 555 if (sess->dir == DIR_DEC) { 556 sge++; 557 old_icv = (uint8_t *)(sge + 1); 558 memcpy(old_icv, sym_op->aead.digest.data, 559 sess->digest_length); 560 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 561 sge->length = sess->digest_length; 562 } 563 DPAA2_SET_FLE_FIN(sge); 564 565 if (auth_only_len) { 566 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 567 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 568 } 569 570 DPAA2_SET_FD_LEN(fd, fle->length); 571 return 0; 572 } 573 574 static inline int 575 build_authenc_sg_fd(dpaa2_sec_session *sess, 576 struct rte_crypto_op *op, 577 struct qbman_fd *fd, __rte_unused uint16_t bpid) 578 { 579 struct rte_crypto_sym_op *sym_op = op->sym; 580 struct ctxt_priv *priv = sess->ctxt; 581 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 582 struct sec_flow_context *flc; 583 uint16_t auth_hdr_len = sym_op->cipher.data.offset - 584 sym_op->auth.data.offset; 585 uint16_t auth_tail_len = sym_op->auth.data.length - 586 sym_op->cipher.data.length - auth_hdr_len; 587 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len; 588 int icv_len = sess->digest_length; 589 uint8_t *old_icv; 590 struct rte_mbuf *mbuf; 591 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 592 sess->iv.offset); 593 594 if (sym_op->m_dst) 595 mbuf = sym_op->m_dst; 596 else 597 mbuf = sym_op->m_src; 598 599 /* first FLE entry used to store mbuf and session ctxt */ 600 fle = (struct qbman_fle *)rte_malloc(NULL, 601 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 602 RTE_CACHE_LINE_SIZE); 603 if (unlikely(!fle)) { 604 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE"); 605 return -ENOMEM; 606 } 607 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 608 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 609 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 610 611 op_fle = fle + 1; 612 ip_fle = fle + 2; 613 sge = fle + 3; 614 615 /* Save the shared descriptor */ 616 flc = &priv->flc_desc[0].flc; 617 618 /* Configure FD as a FRAME LIST */ 619 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 620 DPAA2_SET_FD_COMPOUND_FMT(fd); 621 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 622 623 DPAA2_SEC_DP_DEBUG( 624 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n" 625 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", 626 sym_op->auth.data.offset, 627 sym_op->auth.data.length, 628 sess->digest_length, 629 sym_op->cipher.data.offset, 630 sym_op->cipher.data.length, 631 sess->iv.length, 632 sym_op->m_src->data_off); 633 634 /* Configure Output FLE with Scatter/Gather Entry */ 635 DPAA2_SET_FLE_SG_EXT(op_fle); 636 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 637 638 if (auth_only_len) 639 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len); 640 641 op_fle->length = (sess->dir == DIR_ENC) ? 642 (sym_op->cipher.data.length + icv_len) : 643 sym_op->cipher.data.length; 644 645 /* Configure Output SGE for Encap/Decap */ 646 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 647 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset); 648 sge->length = mbuf->data_len - sym_op->auth.data.offset; 649 650 mbuf = mbuf->next; 651 /* o/p segs */ 652 while (mbuf) { 653 sge++; 654 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 655 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 656 sge->length = mbuf->data_len; 657 mbuf = mbuf->next; 658 } 659 sge->length -= icv_len; 660 661 if (sess->dir == DIR_ENC) { 662 sge++; 663 DPAA2_SET_FLE_ADDR(sge, 664 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 665 sge->length = icv_len; 666 } 667 DPAA2_SET_FLE_FIN(sge); 668 669 sge++; 670 mbuf = sym_op->m_src; 671 672 /* Configure Input FLE with Scatter/Gather Entry */ 673 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 674 DPAA2_SET_FLE_SG_EXT(ip_fle); 675 DPAA2_SET_FLE_FIN(ip_fle); 676 ip_fle->length = (sess->dir == DIR_ENC) ? 677 (sym_op->auth.data.length + sess->iv.length) : 678 (sym_op->auth.data.length + sess->iv.length + 679 icv_len); 680 681 /* Configure Input SGE for Encap/Decap */ 682 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 683 sge->length = sess->iv.length; 684 685 sge++; 686 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 687 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + 688 mbuf->data_off); 689 sge->length = mbuf->data_len - sym_op->auth.data.offset; 690 691 mbuf = mbuf->next; 692 /* i/p segs */ 693 while (mbuf) { 694 sge++; 695 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 696 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 697 sge->length = mbuf->data_len; 698 mbuf = mbuf->next; 699 } 700 sge->length -= icv_len; 701 702 if (sess->dir == DIR_DEC) { 703 sge++; 704 old_icv = (uint8_t *)(sge + 1); 705 memcpy(old_icv, sym_op->auth.digest.data, 706 icv_len); 707 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 708 sge->length = icv_len; 709 } 710 711 DPAA2_SET_FLE_FIN(sge); 712 if (auth_only_len) { 713 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len); 714 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 715 } 716 DPAA2_SET_FD_LEN(fd, ip_fle->length); 717 718 return 0; 719 } 720 721 static inline int 722 build_authenc_fd(dpaa2_sec_session *sess, 723 struct rte_crypto_op *op, 724 struct qbman_fd *fd, uint16_t bpid) 725 { 726 struct rte_crypto_sym_op *sym_op = op->sym; 727 struct ctxt_priv *priv = sess->ctxt; 728 struct qbman_fle *fle, *sge; 729 struct sec_flow_context *flc; 730 uint16_t auth_hdr_len = sym_op->cipher.data.offset - 731 sym_op->auth.data.offset; 732 uint16_t auth_tail_len = sym_op->auth.data.length - 733 sym_op->cipher.data.length - auth_hdr_len; 734 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len; 735 736 int icv_len = sess->digest_length, retval; 737 uint8_t *old_icv; 738 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 739 sess->iv.offset); 740 struct rte_mbuf *dst; 741 742 if (sym_op->m_dst) 743 dst = sym_op->m_dst; 744 else 745 dst = sym_op->m_src; 746 747 /* we are using the first FLE entry to store Mbuf. 748 * Currently we donot know which FLE has the mbuf stored. 749 * So while retreiving we can go back 1 FLE from the FD -ADDR 750 * to get the MBUF Addr from the previous FLE. 751 * We can have a better approach to use the inline Mbuf 752 */ 753 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 754 if (retval) { 755 DPAA2_SEC_ERR("Memory alloc failed for SGE"); 756 return -ENOMEM; 757 } 758 memset(fle, 0, FLE_POOL_BUF_SIZE); 759 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 760 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 761 fle = fle + 1; 762 sge = fle + 2; 763 if (likely(bpid < MAX_BPID)) { 764 DPAA2_SET_FD_BPID(fd, bpid); 765 DPAA2_SET_FLE_BPID(fle, bpid); 766 DPAA2_SET_FLE_BPID(fle + 1, bpid); 767 DPAA2_SET_FLE_BPID(sge, bpid); 768 DPAA2_SET_FLE_BPID(sge + 1, bpid); 769 DPAA2_SET_FLE_BPID(sge + 2, bpid); 770 DPAA2_SET_FLE_BPID(sge + 3, bpid); 771 } else { 772 DPAA2_SET_FD_IVP(fd); 773 DPAA2_SET_FLE_IVP(fle); 774 DPAA2_SET_FLE_IVP((fle + 1)); 775 DPAA2_SET_FLE_IVP(sge); 776 DPAA2_SET_FLE_IVP((sge + 1)); 777 DPAA2_SET_FLE_IVP((sge + 2)); 778 DPAA2_SET_FLE_IVP((sge + 3)); 779 } 780 781 /* Save the shared descriptor */ 782 flc = &priv->flc_desc[0].flc; 783 /* Configure FD as a FRAME LIST */ 784 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 785 DPAA2_SET_FD_COMPOUND_FMT(fd); 786 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 787 788 DPAA2_SEC_DP_DEBUG( 789 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n" 790 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", 791 sym_op->auth.data.offset, 792 sym_op->auth.data.length, 793 sess->digest_length, 794 sym_op->cipher.data.offset, 795 sym_op->cipher.data.length, 796 sess->iv.length, 797 sym_op->m_src->data_off); 798 799 /* Configure Output FLE with Scatter/Gather Entry */ 800 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 801 if (auth_only_len) 802 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 803 fle->length = (sess->dir == DIR_ENC) ? 804 (sym_op->cipher.data.length + icv_len) : 805 sym_op->cipher.data.length; 806 807 DPAA2_SET_FLE_SG_EXT(fle); 808 809 /* Configure Output SGE for Encap/Decap */ 810 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 811 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + 812 dst->data_off); 813 sge->length = sym_op->cipher.data.length; 814 815 if (sess->dir == DIR_ENC) { 816 sge++; 817 DPAA2_SET_FLE_ADDR(sge, 818 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 819 sge->length = sess->digest_length; 820 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 821 sess->iv.length)); 822 } 823 DPAA2_SET_FLE_FIN(sge); 824 825 sge++; 826 fle++; 827 828 /* Configure Input FLE with Scatter/Gather Entry */ 829 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 830 DPAA2_SET_FLE_SG_EXT(fle); 831 DPAA2_SET_FLE_FIN(fle); 832 fle->length = (sess->dir == DIR_ENC) ? 833 (sym_op->auth.data.length + sess->iv.length) : 834 (sym_op->auth.data.length + sess->iv.length + 835 sess->digest_length); 836 837 /* Configure Input SGE for Encap/Decap */ 838 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 839 sge->length = sess->iv.length; 840 sge++; 841 842 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 843 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + 844 sym_op->m_src->data_off); 845 sge->length = sym_op->auth.data.length; 846 if (sess->dir == DIR_DEC) { 847 sge++; 848 old_icv = (uint8_t *)(sge + 1); 849 memcpy(old_icv, sym_op->auth.digest.data, 850 sess->digest_length); 851 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 852 sge->length = sess->digest_length; 853 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 854 sess->digest_length + 855 sess->iv.length)); 856 } 857 DPAA2_SET_FLE_FIN(sge); 858 if (auth_only_len) { 859 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 860 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 861 } 862 return 0; 863 } 864 865 static inline int build_auth_sg_fd( 866 dpaa2_sec_session *sess, 867 struct rte_crypto_op *op, 868 struct qbman_fd *fd, 869 __rte_unused uint16_t bpid) 870 { 871 struct rte_crypto_sym_op *sym_op = op->sym; 872 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 873 struct sec_flow_context *flc; 874 struct ctxt_priv *priv = sess->ctxt; 875 int data_len, data_offset; 876 uint8_t *old_digest; 877 struct rte_mbuf *mbuf; 878 879 data_len = sym_op->auth.data.length; 880 data_offset = sym_op->auth.data.offset; 881 882 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 883 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 884 if ((data_len & 7) || (data_offset & 7)) { 885 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes"); 886 return -ENOTSUP; 887 } 888 889 data_len = data_len >> 3; 890 data_offset = data_offset >> 3; 891 } 892 893 mbuf = sym_op->m_src; 894 fle = (struct qbman_fle *)rte_malloc(NULL, 895 FLE_SG_MEM_SIZE(mbuf->nb_segs), 896 RTE_CACHE_LINE_SIZE); 897 if (unlikely(!fle)) { 898 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE"); 899 return -ENOMEM; 900 } 901 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs)); 902 /* first FLE entry used to store mbuf and session ctxt */ 903 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 904 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 905 op_fle = fle + 1; 906 ip_fle = fle + 2; 907 sge = fle + 3; 908 909 flc = &priv->flc_desc[DESC_INITFINAL].flc; 910 /* sg FD */ 911 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 912 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 913 DPAA2_SET_FD_COMPOUND_FMT(fd); 914 915 /* o/p fle */ 916 DPAA2_SET_FLE_ADDR(op_fle, 917 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 918 op_fle->length = sess->digest_length; 919 920 /* i/p fle */ 921 DPAA2_SET_FLE_SG_EXT(ip_fle); 922 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 923 ip_fle->length = data_len; 924 925 if (sess->iv.length) { 926 uint8_t *iv_ptr; 927 928 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 929 sess->iv.offset); 930 931 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 932 iv_ptr = conv_to_snow_f9_iv(iv_ptr); 933 sge->length = 12; 934 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 935 iv_ptr = conv_to_zuc_eia_iv(iv_ptr); 936 sge->length = 8; 937 } else { 938 sge->length = sess->iv.length; 939 } 940 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 941 ip_fle->length += sge->length; 942 sge++; 943 } 944 /* i/p 1st seg */ 945 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 946 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off); 947 948 if (data_len <= (mbuf->data_len - data_offset)) { 949 sge->length = data_len; 950 data_len = 0; 951 } else { 952 sge->length = mbuf->data_len - data_offset; 953 954 /* remaining i/p segs */ 955 while ((data_len = data_len - sge->length) && 956 (mbuf = mbuf->next)) { 957 sge++; 958 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 959 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 960 if (data_len > mbuf->data_len) 961 sge->length = mbuf->data_len; 962 else 963 sge->length = data_len; 964 } 965 } 966 967 if (sess->dir == DIR_DEC) { 968 /* Digest verification case */ 969 sge++; 970 old_digest = (uint8_t *)(sge + 1); 971 rte_memcpy(old_digest, sym_op->auth.digest.data, 972 sess->digest_length); 973 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest)); 974 sge->length = sess->digest_length; 975 ip_fle->length += sess->digest_length; 976 } 977 DPAA2_SET_FLE_FIN(sge); 978 DPAA2_SET_FLE_FIN(ip_fle); 979 DPAA2_SET_FD_LEN(fd, ip_fle->length); 980 981 return 0; 982 } 983 984 static inline int 985 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 986 struct qbman_fd *fd, uint16_t bpid) 987 { 988 struct rte_crypto_sym_op *sym_op = op->sym; 989 struct qbman_fle *fle, *sge; 990 struct sec_flow_context *flc; 991 struct ctxt_priv *priv = sess->ctxt; 992 int data_len, data_offset; 993 uint8_t *old_digest; 994 int retval; 995 996 data_len = sym_op->auth.data.length; 997 data_offset = sym_op->auth.data.offset; 998 999 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || 1000 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 1001 if ((data_len & 7) || (data_offset & 7)) { 1002 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes"); 1003 return -ENOTSUP; 1004 } 1005 1006 data_len = data_len >> 3; 1007 data_offset = data_offset >> 3; 1008 } 1009 1010 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 1011 if (retval) { 1012 DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE"); 1013 return -ENOMEM; 1014 } 1015 memset(fle, 0, FLE_POOL_BUF_SIZE); 1016 /* TODO we are using the first FLE entry to store Mbuf. 1017 * Currently we donot know which FLE has the mbuf stored. 1018 * So while retreiving we can go back 1 FLE from the FD -ADDR 1019 * to get the MBUF Addr from the previous FLE. 1020 * We can have a better approach to use the inline Mbuf 1021 */ 1022 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 1023 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 1024 fle = fle + 1; 1025 sge = fle + 2; 1026 1027 if (likely(bpid < MAX_BPID)) { 1028 DPAA2_SET_FD_BPID(fd, bpid); 1029 DPAA2_SET_FLE_BPID(fle, bpid); 1030 DPAA2_SET_FLE_BPID(fle + 1, bpid); 1031 DPAA2_SET_FLE_BPID(sge, bpid); 1032 DPAA2_SET_FLE_BPID(sge + 1, bpid); 1033 } else { 1034 DPAA2_SET_FD_IVP(fd); 1035 DPAA2_SET_FLE_IVP(fle); 1036 DPAA2_SET_FLE_IVP((fle + 1)); 1037 DPAA2_SET_FLE_IVP(sge); 1038 DPAA2_SET_FLE_IVP((sge + 1)); 1039 } 1040 1041 flc = &priv->flc_desc[DESC_INITFINAL].flc; 1042 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1043 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 1044 DPAA2_SET_FD_COMPOUND_FMT(fd); 1045 1046 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 1047 fle->length = sess->digest_length; 1048 fle++; 1049 1050 /* Setting input FLE */ 1051 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 1052 DPAA2_SET_FLE_SG_EXT(fle); 1053 fle->length = data_len; 1054 1055 if (sess->iv.length) { 1056 uint8_t *iv_ptr; 1057 1058 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1059 sess->iv.offset); 1060 1061 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { 1062 iv_ptr = conv_to_snow_f9_iv(iv_ptr); 1063 sge->length = 12; 1064 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { 1065 iv_ptr = conv_to_zuc_eia_iv(iv_ptr); 1066 sge->length = 8; 1067 } else { 1068 sge->length = sess->iv.length; 1069 } 1070 1071 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1072 fle->length = fle->length + sge->length; 1073 sge++; 1074 } 1075 1076 /* Setting data to authenticate */ 1077 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 1078 DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off); 1079 sge->length = data_len; 1080 1081 if (sess->dir == DIR_DEC) { 1082 sge++; 1083 old_digest = (uint8_t *)(sge + 1); 1084 rte_memcpy(old_digest, sym_op->auth.digest.data, 1085 sess->digest_length); 1086 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest)); 1087 sge->length = sess->digest_length; 1088 fle->length = fle->length + sess->digest_length; 1089 } 1090 1091 DPAA2_SET_FLE_FIN(sge); 1092 DPAA2_SET_FLE_FIN(fle); 1093 DPAA2_SET_FD_LEN(fd, fle->length); 1094 1095 return 0; 1096 } 1097 1098 static int 1099 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 1100 struct qbman_fd *fd, __rte_unused uint16_t bpid) 1101 { 1102 struct rte_crypto_sym_op *sym_op = op->sym; 1103 struct qbman_fle *ip_fle, *op_fle, *sge, *fle; 1104 int data_len, data_offset; 1105 struct sec_flow_context *flc; 1106 struct ctxt_priv *priv = sess->ctxt; 1107 struct rte_mbuf *mbuf; 1108 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1109 sess->iv.offset); 1110 1111 data_len = sym_op->cipher.data.length; 1112 data_offset = sym_op->cipher.data.offset; 1113 1114 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 1115 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 1116 if ((data_len & 7) || (data_offset & 7)) { 1117 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes"); 1118 return -ENOTSUP; 1119 } 1120 1121 data_len = data_len >> 3; 1122 data_offset = data_offset >> 3; 1123 } 1124 1125 if (sym_op->m_dst) 1126 mbuf = sym_op->m_dst; 1127 else 1128 mbuf = sym_op->m_src; 1129 1130 /* first FLE entry used to store mbuf and session ctxt */ 1131 fle = (struct qbman_fle *)rte_malloc(NULL, 1132 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs), 1133 RTE_CACHE_LINE_SIZE); 1134 if (!fle) { 1135 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE"); 1136 return -ENOMEM; 1137 } 1138 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); 1139 /* first FLE entry used to store mbuf and session ctxt */ 1140 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 1141 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 1142 1143 op_fle = fle + 1; 1144 ip_fle = fle + 2; 1145 sge = fle + 3; 1146 1147 flc = &priv->flc_desc[0].flc; 1148 1149 DPAA2_SEC_DP_DEBUG( 1150 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d" 1151 " data_off: 0x%x\n", 1152 data_offset, 1153 data_len, 1154 sess->iv.length, 1155 sym_op->m_src->data_off); 1156 1157 /* o/p fle */ 1158 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 1159 op_fle->length = data_len; 1160 DPAA2_SET_FLE_SG_EXT(op_fle); 1161 1162 /* o/p 1st seg */ 1163 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1164 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off); 1165 sge->length = mbuf->data_len - data_offset; 1166 1167 mbuf = mbuf->next; 1168 /* o/p segs */ 1169 while (mbuf) { 1170 sge++; 1171 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1172 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 1173 sge->length = mbuf->data_len; 1174 mbuf = mbuf->next; 1175 } 1176 DPAA2_SET_FLE_FIN(sge); 1177 1178 DPAA2_SEC_DP_DEBUG( 1179 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n", 1180 flc, fle, fle->addr_hi, fle->addr_lo, 1181 fle->length); 1182 1183 /* i/p fle */ 1184 mbuf = sym_op->m_src; 1185 sge++; 1186 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 1187 ip_fle->length = sess->iv.length + data_len; 1188 DPAA2_SET_FLE_SG_EXT(ip_fle); 1189 1190 /* i/p IV */ 1191 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1192 DPAA2_SET_FLE_OFFSET(sge, 0); 1193 sge->length = sess->iv.length; 1194 1195 sge++; 1196 1197 /* i/p 1st seg */ 1198 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1199 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off); 1200 sge->length = mbuf->data_len - data_offset; 1201 1202 mbuf = mbuf->next; 1203 /* i/p segs */ 1204 while (mbuf) { 1205 sge++; 1206 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1207 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 1208 sge->length = mbuf->data_len; 1209 mbuf = mbuf->next; 1210 } 1211 DPAA2_SET_FLE_FIN(sge); 1212 DPAA2_SET_FLE_FIN(ip_fle); 1213 1214 /* sg fd */ 1215 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 1216 DPAA2_SET_FD_LEN(fd, ip_fle->length); 1217 DPAA2_SET_FD_COMPOUND_FMT(fd); 1218 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1219 1220 DPAA2_SEC_DP_DEBUG( 1221 "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d" 1222 " off =%d, len =%d\n", 1223 DPAA2_GET_FD_ADDR(fd), 1224 DPAA2_GET_FD_BPID(fd), 1225 rte_dpaa2_bpid_info[bpid].meta_data_size, 1226 DPAA2_GET_FD_OFFSET(fd), 1227 DPAA2_GET_FD_LEN(fd)); 1228 return 0; 1229 } 1230 1231 static int 1232 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 1233 struct qbman_fd *fd, uint16_t bpid) 1234 { 1235 struct rte_crypto_sym_op *sym_op = op->sym; 1236 struct qbman_fle *fle, *sge; 1237 int retval, data_len, data_offset; 1238 struct sec_flow_context *flc; 1239 struct ctxt_priv *priv = sess->ctxt; 1240 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1241 sess->iv.offset); 1242 struct rte_mbuf *dst; 1243 1244 data_len = sym_op->cipher.data.length; 1245 data_offset = sym_op->cipher.data.offset; 1246 1247 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || 1248 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { 1249 if ((data_len & 7) || (data_offset & 7)) { 1250 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes"); 1251 return -ENOTSUP; 1252 } 1253 1254 data_len = data_len >> 3; 1255 data_offset = data_offset >> 3; 1256 } 1257 1258 if (sym_op->m_dst) 1259 dst = sym_op->m_dst; 1260 else 1261 dst = sym_op->m_src; 1262 1263 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 1264 if (retval) { 1265 DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE"); 1266 return -ENOMEM; 1267 } 1268 memset(fle, 0, FLE_POOL_BUF_SIZE); 1269 /* TODO we are using the first FLE entry to store Mbuf. 1270 * Currently we donot know which FLE has the mbuf stored. 1271 * So while retreiving we can go back 1 FLE from the FD -ADDR 1272 * to get the MBUF Addr from the previous FLE. 1273 * We can have a better approach to use the inline Mbuf 1274 */ 1275 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 1276 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 1277 fle = fle + 1; 1278 sge = fle + 2; 1279 1280 if (likely(bpid < MAX_BPID)) { 1281 DPAA2_SET_FD_BPID(fd, bpid); 1282 DPAA2_SET_FLE_BPID(fle, bpid); 1283 DPAA2_SET_FLE_BPID(fle + 1, bpid); 1284 DPAA2_SET_FLE_BPID(sge, bpid); 1285 DPAA2_SET_FLE_BPID(sge + 1, bpid); 1286 } else { 1287 DPAA2_SET_FD_IVP(fd); 1288 DPAA2_SET_FLE_IVP(fle); 1289 DPAA2_SET_FLE_IVP((fle + 1)); 1290 DPAA2_SET_FLE_IVP(sge); 1291 DPAA2_SET_FLE_IVP((sge + 1)); 1292 } 1293 1294 flc = &priv->flc_desc[0].flc; 1295 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 1296 DPAA2_SET_FD_LEN(fd, data_len + sess->iv.length); 1297 DPAA2_SET_FD_COMPOUND_FMT(fd); 1298 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1299 1300 DPAA2_SEC_DP_DEBUG( 1301 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d," 1302 " data_off: 0x%x\n", 1303 data_offset, 1304 data_len, 1305 sess->iv.length, 1306 sym_op->m_src->data_off); 1307 1308 DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 1309 DPAA2_SET_FLE_OFFSET(fle, data_offset + dst->data_off); 1310 1311 fle->length = data_len + sess->iv.length; 1312 1313 DPAA2_SEC_DP_DEBUG( 1314 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n", 1315 flc, fle, fle->addr_hi, fle->addr_lo, 1316 fle->length); 1317 1318 fle++; 1319 1320 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 1321 fle->length = data_len + sess->iv.length; 1322 1323 DPAA2_SET_FLE_SG_EXT(fle); 1324 1325 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1326 sge->length = sess->iv.length; 1327 1328 sge++; 1329 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 1330 DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off); 1331 1332 sge->length = data_len; 1333 DPAA2_SET_FLE_FIN(sge); 1334 DPAA2_SET_FLE_FIN(fle); 1335 1336 DPAA2_SEC_DP_DEBUG( 1337 "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d" 1338 " off =%d, len =%d\n", 1339 DPAA2_GET_FD_ADDR(fd), 1340 DPAA2_GET_FD_BPID(fd), 1341 rte_dpaa2_bpid_info[bpid].meta_data_size, 1342 DPAA2_GET_FD_OFFSET(fd), 1343 DPAA2_GET_FD_LEN(fd)); 1344 1345 return 0; 1346 } 1347 1348 static inline int 1349 build_sec_fd(struct rte_crypto_op *op, 1350 struct qbman_fd *fd, uint16_t bpid) 1351 { 1352 int ret = -1; 1353 dpaa2_sec_session *sess; 1354 1355 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) 1356 sess = (dpaa2_sec_session *)get_sym_session_private_data( 1357 op->sym->session, cryptodev_driver_id); 1358 #ifdef RTE_LIBRTE_SECURITY 1359 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) 1360 sess = (dpaa2_sec_session *)get_sec_session_private_data( 1361 op->sym->sec_session); 1362 #endif 1363 else 1364 return -ENOTSUP; 1365 1366 if (!sess) 1367 return -EINVAL; 1368 1369 /* Any of the buffer is segmented*/ 1370 if (!rte_pktmbuf_is_contiguous(op->sym->m_src) || 1371 ((op->sym->m_dst != NULL) && 1372 !rte_pktmbuf_is_contiguous(op->sym->m_dst))) { 1373 switch (sess->ctxt_type) { 1374 case DPAA2_SEC_CIPHER: 1375 ret = build_cipher_sg_fd(sess, op, fd, bpid); 1376 break; 1377 case DPAA2_SEC_AUTH: 1378 ret = build_auth_sg_fd(sess, op, fd, bpid); 1379 break; 1380 case DPAA2_SEC_AEAD: 1381 ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid); 1382 break; 1383 case DPAA2_SEC_CIPHER_HASH: 1384 ret = build_authenc_sg_fd(sess, op, fd, bpid); 1385 break; 1386 #ifdef RTE_LIBRTE_SECURITY 1387 case DPAA2_SEC_IPSEC: 1388 case DPAA2_SEC_PDCP: 1389 ret = build_proto_compound_sg_fd(sess, op, fd, bpid); 1390 break; 1391 #endif 1392 case DPAA2_SEC_HASH_CIPHER: 1393 default: 1394 DPAA2_SEC_ERR("error: Unsupported session"); 1395 } 1396 } else { 1397 switch (sess->ctxt_type) { 1398 case DPAA2_SEC_CIPHER: 1399 ret = build_cipher_fd(sess, op, fd, bpid); 1400 break; 1401 case DPAA2_SEC_AUTH: 1402 ret = build_auth_fd(sess, op, fd, bpid); 1403 break; 1404 case DPAA2_SEC_AEAD: 1405 ret = build_authenc_gcm_fd(sess, op, fd, bpid); 1406 break; 1407 case DPAA2_SEC_CIPHER_HASH: 1408 ret = build_authenc_fd(sess, op, fd, bpid); 1409 break; 1410 #ifdef RTE_LIBRTE_SECURITY 1411 case DPAA2_SEC_IPSEC: 1412 ret = build_proto_fd(sess, op, fd, bpid); 1413 break; 1414 case DPAA2_SEC_PDCP: 1415 ret = build_proto_compound_fd(sess, op, fd, bpid); 1416 break; 1417 #endif 1418 case DPAA2_SEC_HASH_CIPHER: 1419 default: 1420 DPAA2_SEC_ERR("error: Unsupported session"); 1421 ret = -ENOTSUP; 1422 } 1423 } 1424 return ret; 1425 } 1426 1427 static uint16_t 1428 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, 1429 uint16_t nb_ops) 1430 { 1431 /* Function to transmit the frames to given device and VQ*/ 1432 uint32_t loop; 1433 int32_t ret; 1434 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 1435 uint32_t frames_to_send, retry_count; 1436 struct qbman_eq_desc eqdesc; 1437 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 1438 struct qbman_swp *swp; 1439 uint16_t num_tx = 0; 1440 uint32_t flags[MAX_TX_RING_SLOTS] = {0}; 1441 /*todo - need to support multiple buffer pools */ 1442 uint16_t bpid; 1443 struct rte_mempool *mb_pool; 1444 1445 if (unlikely(nb_ops == 0)) 1446 return 0; 1447 1448 if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { 1449 DPAA2_SEC_ERR("sessionless crypto op not supported"); 1450 return 0; 1451 } 1452 /*Prepare enqueue descriptor*/ 1453 qbman_eq_desc_clear(&eqdesc); 1454 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); 1455 qbman_eq_desc_set_response(&eqdesc, 0, 0); 1456 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid); 1457 1458 if (!DPAA2_PER_LCORE_DPIO) { 1459 ret = dpaa2_affine_qbman_swp(); 1460 if (ret) { 1461 DPAA2_SEC_ERR( 1462 "Failed to allocate IO portal, tid: %d\n", 1463 rte_gettid()); 1464 return 0; 1465 } 1466 } 1467 swp = DPAA2_PER_LCORE_PORTAL; 1468 1469 while (nb_ops) { 1470 frames_to_send = (nb_ops > dpaa2_eqcr_size) ? 1471 dpaa2_eqcr_size : nb_ops; 1472 1473 for (loop = 0; loop < frames_to_send; loop++) { 1474 if ((*ops)->sym->m_src->seqn) { 1475 uint8_t dqrr_index = (*ops)->sym->m_src->seqn - 1; 1476 1477 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index; 1478 DPAA2_PER_LCORE_DQRR_SIZE--; 1479 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index); 1480 (*ops)->sym->m_src->seqn = DPAA2_INVALID_MBUF_SEQN; 1481 } 1482 1483 /*Clear the unused FD fields before sending*/ 1484 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd)); 1485 mb_pool = (*ops)->sym->m_src->pool; 1486 bpid = mempool_to_bpid(mb_pool); 1487 ret = build_sec_fd(*ops, &fd_arr[loop], bpid); 1488 if (ret) { 1489 DPAA2_SEC_ERR("error: Improper packet contents" 1490 " for crypto operation"); 1491 goto skip_tx; 1492 } 1493 ops++; 1494 } 1495 1496 loop = 0; 1497 retry_count = 0; 1498 while (loop < frames_to_send) { 1499 ret = qbman_swp_enqueue_multiple(swp, &eqdesc, 1500 &fd_arr[loop], 1501 &flags[loop], 1502 frames_to_send - loop); 1503 if (unlikely(ret < 0)) { 1504 retry_count++; 1505 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) { 1506 num_tx += loop; 1507 nb_ops -= loop; 1508 goto skip_tx; 1509 } 1510 } else { 1511 loop += ret; 1512 retry_count = 0; 1513 } 1514 } 1515 1516 num_tx += loop; 1517 nb_ops -= loop; 1518 } 1519 skip_tx: 1520 dpaa2_qp->tx_vq.tx_pkts += num_tx; 1521 dpaa2_qp->tx_vq.err_pkts += nb_ops; 1522 return num_tx; 1523 } 1524 1525 #ifdef RTE_LIBRTE_SECURITY 1526 static inline struct rte_crypto_op * 1527 sec_simple_fd_to_mbuf(const struct qbman_fd *fd) 1528 { 1529 struct rte_crypto_op *op; 1530 uint16_t len = DPAA2_GET_FD_LEN(fd); 1531 int16_t diff = 0; 1532 dpaa2_sec_session *sess_priv __rte_unused; 1533 1534 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF( 1535 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)), 1536 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 1537 1538 diff = len - mbuf->pkt_len; 1539 mbuf->pkt_len += diff; 1540 mbuf->data_len += diff; 1541 op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova; 1542 mbuf->buf_iova = op->sym->aead.digest.phys_addr; 1543 op->sym->aead.digest.phys_addr = 0L; 1544 1545 sess_priv = (dpaa2_sec_session *)get_sec_session_private_data( 1546 op->sym->sec_session); 1547 if (sess_priv->dir == DIR_ENC) 1548 mbuf->data_off += SEC_FLC_DHR_OUTBOUND; 1549 else 1550 mbuf->data_off += SEC_FLC_DHR_INBOUND; 1551 1552 return op; 1553 } 1554 #endif 1555 1556 static inline struct rte_crypto_op * 1557 sec_fd_to_mbuf(const struct qbman_fd *fd) 1558 { 1559 struct qbman_fle *fle; 1560 struct rte_crypto_op *op; 1561 struct ctxt_priv *priv; 1562 struct rte_mbuf *dst, *src; 1563 1564 #ifdef RTE_LIBRTE_SECURITY 1565 if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single) 1566 return sec_simple_fd_to_mbuf(fd); 1567 #endif 1568 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); 1569 1570 DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n", 1571 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset); 1572 1573 /* we are using the first FLE entry to store Mbuf. 1574 * Currently we donot know which FLE has the mbuf stored. 1575 * So while retreiving we can go back 1 FLE from the FD -ADDR 1576 * to get the MBUF Addr from the previous FLE. 1577 * We can have a better approach to use the inline Mbuf 1578 */ 1579 1580 if (unlikely(DPAA2_GET_FD_IVP(fd))) { 1581 /* TODO complete it. */ 1582 DPAA2_SEC_ERR("error: non inline buffer"); 1583 return NULL; 1584 } 1585 op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1)); 1586 1587 /* Prefeth op */ 1588 src = op->sym->m_src; 1589 rte_prefetch0(src); 1590 1591 if (op->sym->m_dst) { 1592 dst = op->sym->m_dst; 1593 rte_prefetch0(dst); 1594 } else 1595 dst = src; 1596 1597 #ifdef RTE_LIBRTE_SECURITY 1598 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 1599 uint16_t len = DPAA2_GET_FD_LEN(fd); 1600 dst->pkt_len = len; 1601 while (dst->next != NULL) { 1602 len -= dst->data_len; 1603 dst = dst->next; 1604 } 1605 dst->data_len = len; 1606 } 1607 #endif 1608 DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p," 1609 " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n", 1610 (void *)dst, 1611 dst->buf_addr, 1612 DPAA2_GET_FD_ADDR(fd), 1613 DPAA2_GET_FD_BPID(fd), 1614 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, 1615 DPAA2_GET_FD_OFFSET(fd), 1616 DPAA2_GET_FD_LEN(fd)); 1617 1618 /* free the fle memory */ 1619 if (likely(rte_pktmbuf_is_contiguous(src))) { 1620 priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1); 1621 rte_mempool_put(priv->fle_pool, (void *)(fle-1)); 1622 } else 1623 rte_free((void *)(fle-1)); 1624 1625 return op; 1626 } 1627 1628 static uint16_t 1629 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, 1630 uint16_t nb_ops) 1631 { 1632 /* Function is responsible to receive frames for a given device and VQ*/ 1633 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 1634 struct qbman_result *dq_storage; 1635 uint32_t fqid = dpaa2_qp->rx_vq.fqid; 1636 int ret, num_rx = 0; 1637 uint8_t is_last = 0, status; 1638 struct qbman_swp *swp; 1639 const struct qbman_fd *fd; 1640 struct qbman_pull_desc pulldesc; 1641 1642 if (!DPAA2_PER_LCORE_DPIO) { 1643 ret = dpaa2_affine_qbman_swp(); 1644 if (ret) { 1645 DPAA2_SEC_ERR( 1646 "Failed to allocate IO portal, tid: %d\n", 1647 rte_gettid()); 1648 return 0; 1649 } 1650 } 1651 swp = DPAA2_PER_LCORE_PORTAL; 1652 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0]; 1653 1654 qbman_pull_desc_clear(&pulldesc); 1655 qbman_pull_desc_set_numframes(&pulldesc, 1656 (nb_ops > dpaa2_dqrr_size) ? 1657 dpaa2_dqrr_size : nb_ops); 1658 qbman_pull_desc_set_fq(&pulldesc, fqid); 1659 qbman_pull_desc_set_storage(&pulldesc, dq_storage, 1660 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage), 1661 1); 1662 1663 /*Issue a volatile dequeue command. */ 1664 while (1) { 1665 if (qbman_swp_pull(swp, &pulldesc)) { 1666 DPAA2_SEC_WARN( 1667 "SEC VDQ command is not issued : QBMAN busy"); 1668 /* Portal was busy, try again */ 1669 continue; 1670 } 1671 break; 1672 }; 1673 1674 /* Receive the packets till Last Dequeue entry is found with 1675 * respect to the above issues PULL command. 1676 */ 1677 while (!is_last) { 1678 /* Check if the previous issued command is completed. 1679 * Also seems like the SWP is shared between the Ethernet Driver 1680 * and the SEC driver. 1681 */ 1682 while (!qbman_check_command_complete(dq_storage)) 1683 ; 1684 1685 /* Loop until the dq_storage is updated with 1686 * new token by QBMAN 1687 */ 1688 while (!qbman_check_new_result(dq_storage)) 1689 ; 1690 /* Check whether Last Pull command is Expired and 1691 * setting Condition for Loop termination 1692 */ 1693 if (qbman_result_DQ_is_pull_complete(dq_storage)) { 1694 is_last = 1; 1695 /* Check for valid frame. */ 1696 status = (uint8_t)qbman_result_DQ_flags(dq_storage); 1697 if (unlikely( 1698 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) { 1699 DPAA2_SEC_DP_DEBUG("No frame is delivered\n"); 1700 continue; 1701 } 1702 } 1703 1704 fd = qbman_result_DQ_fd(dq_storage); 1705 ops[num_rx] = sec_fd_to_mbuf(fd); 1706 1707 if (unlikely(fd->simple.frc)) { 1708 /* TODO Parse SEC errors */ 1709 DPAA2_SEC_ERR("SEC returned Error - %x", 1710 fd->simple.frc); 1711 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR; 1712 } else { 1713 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 1714 } 1715 1716 num_rx++; 1717 dq_storage++; 1718 } /* End of Packet Rx loop */ 1719 1720 dpaa2_qp->rx_vq.rx_pkts += num_rx; 1721 1722 DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx); 1723 /*Return the total number of packets received to DPAA2 app*/ 1724 return num_rx; 1725 } 1726 1727 /** Release queue pair */ 1728 static int 1729 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) 1730 { 1731 struct dpaa2_sec_qp *qp = 1732 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id]; 1733 1734 PMD_INIT_FUNC_TRACE(); 1735 1736 if (qp->rx_vq.q_storage) { 1737 dpaa2_free_dq_storage(qp->rx_vq.q_storage); 1738 rte_free(qp->rx_vq.q_storage); 1739 } 1740 rte_free(qp); 1741 1742 dev->data->queue_pairs[queue_pair_id] = NULL; 1743 1744 return 0; 1745 } 1746 1747 /** Setup a queue pair */ 1748 static int 1749 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, 1750 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf, 1751 __rte_unused int socket_id) 1752 { 1753 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 1754 struct dpaa2_sec_qp *qp; 1755 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 1756 struct dpseci_rx_queue_cfg cfg; 1757 int32_t retcode; 1758 1759 PMD_INIT_FUNC_TRACE(); 1760 1761 /* If qp is already in use free ring memory and qp metadata. */ 1762 if (dev->data->queue_pairs[qp_id] != NULL) { 1763 DPAA2_SEC_INFO("QP already setup"); 1764 return 0; 1765 } 1766 1767 DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p", 1768 dev, qp_id, qp_conf); 1769 1770 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 1771 1772 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp), 1773 RTE_CACHE_LINE_SIZE); 1774 if (!qp) { 1775 DPAA2_SEC_ERR("malloc failed for rx/tx queues"); 1776 return -ENOMEM; 1777 } 1778 1779 qp->rx_vq.crypto_data = dev->data; 1780 qp->tx_vq.crypto_data = dev->data; 1781 qp->rx_vq.q_storage = rte_malloc("sec dq storage", 1782 sizeof(struct queue_storage_info_t), 1783 RTE_CACHE_LINE_SIZE); 1784 if (!qp->rx_vq.q_storage) { 1785 DPAA2_SEC_ERR("malloc failed for q_storage"); 1786 return -ENOMEM; 1787 } 1788 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t)); 1789 1790 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) { 1791 DPAA2_SEC_ERR("Unable to allocate dequeue storage"); 1792 return -ENOMEM; 1793 } 1794 1795 dev->data->queue_pairs[qp_id] = qp; 1796 1797 cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX; 1798 cfg.user_ctx = (size_t)(&qp->rx_vq); 1799 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 1800 qp_id, &cfg); 1801 return retcode; 1802 } 1803 1804 /** Returns the size of the aesni gcm session structure */ 1805 static unsigned int 1806 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) 1807 { 1808 PMD_INIT_FUNC_TRACE(); 1809 1810 return sizeof(dpaa2_sec_session); 1811 } 1812 1813 static int 1814 dpaa2_sec_cipher_init(struct rte_cryptodev *dev, 1815 struct rte_crypto_sym_xform *xform, 1816 dpaa2_sec_session *session) 1817 { 1818 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1819 struct alginfo cipherdata; 1820 int bufsize, ret = 0; 1821 struct ctxt_priv *priv; 1822 struct sec_flow_context *flc; 1823 1824 PMD_INIT_FUNC_TRACE(); 1825 1826 /* For SEC CIPHER only one descriptor is required. */ 1827 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1828 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 1829 RTE_CACHE_LINE_SIZE); 1830 if (priv == NULL) { 1831 DPAA2_SEC_ERR("No Memory for priv CTXT"); 1832 return -ENOMEM; 1833 } 1834 1835 priv->fle_pool = dev_priv->fle_pool; 1836 1837 flc = &priv->flc_desc[0].flc; 1838 1839 session->ctxt_type = DPAA2_SEC_CIPHER; 1840 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, 1841 RTE_CACHE_LINE_SIZE); 1842 if (session->cipher_key.data == NULL) { 1843 DPAA2_SEC_ERR("No Memory for cipher key"); 1844 rte_free(priv); 1845 return -ENOMEM; 1846 } 1847 session->cipher_key.length = xform->cipher.key.length; 1848 1849 memcpy(session->cipher_key.data, xform->cipher.key.data, 1850 xform->cipher.key.length); 1851 cipherdata.key = (size_t)session->cipher_key.data; 1852 cipherdata.keylen = session->cipher_key.length; 1853 cipherdata.key_enc_flags = 0; 1854 cipherdata.key_type = RTA_DATA_IMM; 1855 1856 /* Set IV parameters */ 1857 session->iv.offset = xform->cipher.iv.offset; 1858 session->iv.length = xform->cipher.iv.length; 1859 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1860 DIR_ENC : DIR_DEC; 1861 1862 switch (xform->cipher.algo) { 1863 case RTE_CRYPTO_CIPHER_AES_CBC: 1864 cipherdata.algtype = OP_ALG_ALGSEL_AES; 1865 cipherdata.algmode = OP_ALG_AAI_CBC; 1866 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 1867 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 1868 SHR_NEVER, &cipherdata, 1869 session->iv.length, 1870 session->dir); 1871 break; 1872 case RTE_CRYPTO_CIPHER_3DES_CBC: 1873 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 1874 cipherdata.algmode = OP_ALG_AAI_CBC; 1875 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 1876 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 1877 SHR_NEVER, &cipherdata, 1878 session->iv.length, 1879 session->dir); 1880 break; 1881 case RTE_CRYPTO_CIPHER_DES_CBC: 1882 cipherdata.algtype = OP_ALG_ALGSEL_DES; 1883 cipherdata.algmode = OP_ALG_AAI_CBC; 1884 session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC; 1885 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 1886 SHR_NEVER, &cipherdata, 1887 session->iv.length, 1888 session->dir); 1889 break; 1890 case RTE_CRYPTO_CIPHER_AES_CTR: 1891 cipherdata.algtype = OP_ALG_ALGSEL_AES; 1892 cipherdata.algmode = OP_ALG_AAI_CTR; 1893 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 1894 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 1895 SHR_NEVER, &cipherdata, 1896 session->iv.length, 1897 session->dir); 1898 break; 1899 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 1900 cipherdata.algtype = OP_ALG_ALGSEL_SNOW_F8; 1901 session->cipher_alg = RTE_CRYPTO_CIPHER_SNOW3G_UEA2; 1902 bufsize = cnstr_shdsc_snow_f8(priv->flc_desc[0].desc, 1, 0, 1903 &cipherdata, 1904 session->dir); 1905 break; 1906 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 1907 cipherdata.algtype = OP_ALG_ALGSEL_ZUCE; 1908 session->cipher_alg = RTE_CRYPTO_CIPHER_ZUC_EEA3; 1909 bufsize = cnstr_shdsc_zuce(priv->flc_desc[0].desc, 1, 0, 1910 &cipherdata, 1911 session->dir); 1912 break; 1913 case RTE_CRYPTO_CIPHER_KASUMI_F8: 1914 case RTE_CRYPTO_CIPHER_AES_F8: 1915 case RTE_CRYPTO_CIPHER_AES_ECB: 1916 case RTE_CRYPTO_CIPHER_3DES_ECB: 1917 case RTE_CRYPTO_CIPHER_3DES_CTR: 1918 case RTE_CRYPTO_CIPHER_AES_XTS: 1919 case RTE_CRYPTO_CIPHER_ARC4: 1920 case RTE_CRYPTO_CIPHER_NULL: 1921 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", 1922 xform->cipher.algo); 1923 ret = -ENOTSUP; 1924 goto error_out; 1925 default: 1926 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 1927 xform->cipher.algo); 1928 ret = -ENOTSUP; 1929 goto error_out; 1930 } 1931 1932 if (bufsize < 0) { 1933 DPAA2_SEC_ERR("Crypto: Descriptor build failed"); 1934 ret = -EINVAL; 1935 goto error_out; 1936 } 1937 1938 flc->word1_sdl = (uint8_t)bufsize; 1939 session->ctxt = priv; 1940 1941 #ifdef CAAM_DESC_DEBUG 1942 int i; 1943 for (i = 0; i < bufsize; i++) 1944 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]); 1945 #endif 1946 return ret; 1947 1948 error_out: 1949 rte_free(session->cipher_key.data); 1950 rte_free(priv); 1951 return ret; 1952 } 1953 1954 static int 1955 dpaa2_sec_auth_init(struct rte_cryptodev *dev, 1956 struct rte_crypto_sym_xform *xform, 1957 dpaa2_sec_session *session) 1958 { 1959 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1960 struct alginfo authdata; 1961 int bufsize, ret = 0; 1962 struct ctxt_priv *priv; 1963 struct sec_flow_context *flc; 1964 1965 PMD_INIT_FUNC_TRACE(); 1966 1967 /* For SEC AUTH three descriptors are required for various stages */ 1968 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1969 sizeof(struct ctxt_priv) + 3 * 1970 sizeof(struct sec_flc_desc), 1971 RTE_CACHE_LINE_SIZE); 1972 if (priv == NULL) { 1973 DPAA2_SEC_ERR("No Memory for priv CTXT"); 1974 return -ENOMEM; 1975 } 1976 1977 priv->fle_pool = dev_priv->fle_pool; 1978 flc = &priv->flc_desc[DESC_INITFINAL].flc; 1979 1980 session->ctxt_type = DPAA2_SEC_AUTH; 1981 session->auth_key.length = xform->auth.key.length; 1982 if (xform->auth.key.length) { 1983 session->auth_key.data = rte_zmalloc(NULL, 1984 xform->auth.key.length, 1985 RTE_CACHE_LINE_SIZE); 1986 if (session->auth_key.data == NULL) { 1987 DPAA2_SEC_ERR("Unable to allocate memory for auth key"); 1988 rte_free(priv); 1989 return -ENOMEM; 1990 } 1991 memcpy(session->auth_key.data, xform->auth.key.data, 1992 xform->auth.key.length); 1993 authdata.key = (size_t)session->auth_key.data; 1994 authdata.key_enc_flags = 0; 1995 authdata.key_type = RTA_DATA_IMM; 1996 } 1997 authdata.keylen = session->auth_key.length; 1998 1999 session->digest_length = xform->auth.digest_length; 2000 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? 2001 DIR_ENC : DIR_DEC; 2002 2003 switch (xform->auth.algo) { 2004 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2005 authdata.algtype = OP_ALG_ALGSEL_SHA1; 2006 authdata.algmode = OP_ALG_AAI_HMAC; 2007 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 2008 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2009 1, 0, SHR_NEVER, &authdata, 2010 !session->dir, 2011 session->digest_length); 2012 break; 2013 case RTE_CRYPTO_AUTH_MD5_HMAC: 2014 authdata.algtype = OP_ALG_ALGSEL_MD5; 2015 authdata.algmode = OP_ALG_AAI_HMAC; 2016 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 2017 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2018 1, 0, SHR_NEVER, &authdata, 2019 !session->dir, 2020 session->digest_length); 2021 break; 2022 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2023 authdata.algtype = OP_ALG_ALGSEL_SHA256; 2024 authdata.algmode = OP_ALG_AAI_HMAC; 2025 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 2026 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2027 1, 0, SHR_NEVER, &authdata, 2028 !session->dir, 2029 session->digest_length); 2030 break; 2031 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2032 authdata.algtype = OP_ALG_ALGSEL_SHA384; 2033 authdata.algmode = OP_ALG_AAI_HMAC; 2034 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 2035 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2036 1, 0, SHR_NEVER, &authdata, 2037 !session->dir, 2038 session->digest_length); 2039 break; 2040 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2041 authdata.algtype = OP_ALG_ALGSEL_SHA512; 2042 authdata.algmode = OP_ALG_AAI_HMAC; 2043 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 2044 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2045 1, 0, SHR_NEVER, &authdata, 2046 !session->dir, 2047 session->digest_length); 2048 break; 2049 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2050 authdata.algtype = OP_ALG_ALGSEL_SHA224; 2051 authdata.algmode = OP_ALG_AAI_HMAC; 2052 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 2053 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 2054 1, 0, SHR_NEVER, &authdata, 2055 !session->dir, 2056 session->digest_length); 2057 break; 2058 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2059 authdata.algtype = OP_ALG_ALGSEL_SNOW_F9; 2060 authdata.algmode = OP_ALG_AAI_F9; 2061 session->auth_alg = RTE_CRYPTO_AUTH_SNOW3G_UIA2; 2062 session->iv.offset = xform->auth.iv.offset; 2063 session->iv.length = xform->auth.iv.length; 2064 bufsize = cnstr_shdsc_snow_f9(priv->flc_desc[DESC_INITFINAL].desc, 2065 1, 0, &authdata, 2066 !session->dir, 2067 session->digest_length); 2068 break; 2069 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2070 authdata.algtype = OP_ALG_ALGSEL_ZUCA; 2071 authdata.algmode = OP_ALG_AAI_F9; 2072 session->auth_alg = RTE_CRYPTO_AUTH_ZUC_EIA3; 2073 session->iv.offset = xform->auth.iv.offset; 2074 session->iv.length = xform->auth.iv.length; 2075 bufsize = cnstr_shdsc_zuca(priv->flc_desc[DESC_INITFINAL].desc, 2076 1, 0, &authdata, 2077 !session->dir, 2078 session->digest_length); 2079 break; 2080 case RTE_CRYPTO_AUTH_SHA1: 2081 authdata.algtype = OP_ALG_ALGSEL_SHA1; 2082 authdata.algmode = OP_ALG_AAI_HASH; 2083 session->auth_alg = RTE_CRYPTO_AUTH_SHA1; 2084 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2085 1, 0, SHR_NEVER, &authdata, 2086 !session->dir, 2087 session->digest_length); 2088 break; 2089 case RTE_CRYPTO_AUTH_MD5: 2090 authdata.algtype = OP_ALG_ALGSEL_MD5; 2091 authdata.algmode = OP_ALG_AAI_HASH; 2092 session->auth_alg = RTE_CRYPTO_AUTH_MD5; 2093 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2094 1, 0, SHR_NEVER, &authdata, 2095 !session->dir, 2096 session->digest_length); 2097 break; 2098 case RTE_CRYPTO_AUTH_SHA256: 2099 authdata.algtype = OP_ALG_ALGSEL_SHA256; 2100 authdata.algmode = OP_ALG_AAI_HASH; 2101 session->auth_alg = RTE_CRYPTO_AUTH_SHA256; 2102 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2103 1, 0, SHR_NEVER, &authdata, 2104 !session->dir, 2105 session->digest_length); 2106 break; 2107 case RTE_CRYPTO_AUTH_SHA384: 2108 authdata.algtype = OP_ALG_ALGSEL_SHA384; 2109 authdata.algmode = OP_ALG_AAI_HASH; 2110 session->auth_alg = RTE_CRYPTO_AUTH_SHA384; 2111 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2112 1, 0, SHR_NEVER, &authdata, 2113 !session->dir, 2114 session->digest_length); 2115 break; 2116 case RTE_CRYPTO_AUTH_SHA512: 2117 authdata.algtype = OP_ALG_ALGSEL_SHA512; 2118 authdata.algmode = OP_ALG_AAI_HASH; 2119 session->auth_alg = RTE_CRYPTO_AUTH_SHA512; 2120 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2121 1, 0, SHR_NEVER, &authdata, 2122 !session->dir, 2123 session->digest_length); 2124 break; 2125 case RTE_CRYPTO_AUTH_SHA224: 2126 authdata.algtype = OP_ALG_ALGSEL_SHA224; 2127 authdata.algmode = OP_ALG_AAI_HASH; 2128 session->auth_alg = RTE_CRYPTO_AUTH_SHA224; 2129 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, 2130 1, 0, SHR_NEVER, &authdata, 2131 !session->dir, 2132 session->digest_length); 2133 break; 2134 case RTE_CRYPTO_AUTH_AES_GMAC: 2135 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2136 case RTE_CRYPTO_AUTH_AES_CMAC: 2137 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 2138 case RTE_CRYPTO_AUTH_KASUMI_F9: 2139 case RTE_CRYPTO_AUTH_NULL: 2140 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un", 2141 xform->auth.algo); 2142 ret = -ENOTSUP; 2143 goto error_out; 2144 default: 2145 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", 2146 xform->auth.algo); 2147 ret = -ENOTSUP; 2148 goto error_out; 2149 } 2150 2151 if (bufsize < 0) { 2152 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2153 ret = -EINVAL; 2154 goto error_out; 2155 } 2156 2157 flc->word1_sdl = (uint8_t)bufsize; 2158 session->ctxt = priv; 2159 #ifdef CAAM_DESC_DEBUG 2160 int i; 2161 for (i = 0; i < bufsize; i++) 2162 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", 2163 i, priv->flc_desc[DESC_INITFINAL].desc[i]); 2164 #endif 2165 2166 return ret; 2167 2168 error_out: 2169 rte_free(session->auth_key.data); 2170 rte_free(priv); 2171 return ret; 2172 } 2173 2174 static int 2175 dpaa2_sec_aead_init(struct rte_cryptodev *dev, 2176 struct rte_crypto_sym_xform *xform, 2177 dpaa2_sec_session *session) 2178 { 2179 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt; 2180 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 2181 struct alginfo aeaddata; 2182 int bufsize; 2183 struct ctxt_priv *priv; 2184 struct sec_flow_context *flc; 2185 struct rte_crypto_aead_xform *aead_xform = &xform->aead; 2186 int err, ret = 0; 2187 2188 PMD_INIT_FUNC_TRACE(); 2189 2190 /* Set IV parameters */ 2191 session->iv.offset = aead_xform->iv.offset; 2192 session->iv.length = aead_xform->iv.length; 2193 session->ctxt_type = DPAA2_SEC_AEAD; 2194 2195 /* For SEC AEAD only one descriptor is required */ 2196 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2197 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 2198 RTE_CACHE_LINE_SIZE); 2199 if (priv == NULL) { 2200 DPAA2_SEC_ERR("No Memory for priv CTXT"); 2201 return -ENOMEM; 2202 } 2203 2204 priv->fle_pool = dev_priv->fle_pool; 2205 flc = &priv->flc_desc[0].flc; 2206 2207 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 2208 RTE_CACHE_LINE_SIZE); 2209 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 2210 DPAA2_SEC_ERR("No Memory for aead key"); 2211 rte_free(priv); 2212 return -ENOMEM; 2213 } 2214 memcpy(session->aead_key.data, aead_xform->key.data, 2215 aead_xform->key.length); 2216 2217 session->digest_length = aead_xform->digest_length; 2218 session->aead_key.length = aead_xform->key.length; 2219 ctxt->auth_only_len = aead_xform->aad_length; 2220 2221 aeaddata.key = (size_t)session->aead_key.data; 2222 aeaddata.keylen = session->aead_key.length; 2223 aeaddata.key_enc_flags = 0; 2224 aeaddata.key_type = RTA_DATA_IMM; 2225 2226 switch (aead_xform->algo) { 2227 case RTE_CRYPTO_AEAD_AES_GCM: 2228 aeaddata.algtype = OP_ALG_ALGSEL_AES; 2229 aeaddata.algmode = OP_ALG_AAI_GCM; 2230 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; 2231 break; 2232 case RTE_CRYPTO_AEAD_AES_CCM: 2233 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u", 2234 aead_xform->algo); 2235 ret = -ENOTSUP; 2236 goto error_out; 2237 default: 2238 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u", 2239 aead_xform->algo); 2240 ret = -ENOTSUP; 2241 goto error_out; 2242 } 2243 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 2244 DIR_ENC : DIR_DEC; 2245 2246 priv->flc_desc[0].desc[0] = aeaddata.keylen; 2247 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 2248 DESC_JOB_IO_LEN, 2249 (unsigned int *)priv->flc_desc[0].desc, 2250 &priv->flc_desc[0].desc[1], 1); 2251 2252 if (err < 0) { 2253 DPAA2_SEC_ERR("Crypto: Incorrect key lengths"); 2254 ret = -EINVAL; 2255 goto error_out; 2256 } 2257 if (priv->flc_desc[0].desc[1] & 1) { 2258 aeaddata.key_type = RTA_DATA_IMM; 2259 } else { 2260 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key); 2261 aeaddata.key_type = RTA_DATA_PTR; 2262 } 2263 priv->flc_desc[0].desc[0] = 0; 2264 priv->flc_desc[0].desc[1] = 0; 2265 2266 if (session->dir == DIR_ENC) 2267 bufsize = cnstr_shdsc_gcm_encap( 2268 priv->flc_desc[0].desc, 1, 0, SHR_NEVER, 2269 &aeaddata, session->iv.length, 2270 session->digest_length); 2271 else 2272 bufsize = cnstr_shdsc_gcm_decap( 2273 priv->flc_desc[0].desc, 1, 0, SHR_NEVER, 2274 &aeaddata, session->iv.length, 2275 session->digest_length); 2276 if (bufsize < 0) { 2277 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2278 ret = -EINVAL; 2279 goto error_out; 2280 } 2281 2282 flc->word1_sdl = (uint8_t)bufsize; 2283 session->ctxt = priv; 2284 #ifdef CAAM_DESC_DEBUG 2285 int i; 2286 for (i = 0; i < bufsize; i++) 2287 DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n", 2288 i, priv->flc_desc[0].desc[i]); 2289 #endif 2290 return ret; 2291 2292 error_out: 2293 rte_free(session->aead_key.data); 2294 rte_free(priv); 2295 return ret; 2296 } 2297 2298 2299 static int 2300 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, 2301 struct rte_crypto_sym_xform *xform, 2302 dpaa2_sec_session *session) 2303 { 2304 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 2305 struct alginfo authdata, cipherdata; 2306 int bufsize; 2307 struct ctxt_priv *priv; 2308 struct sec_flow_context *flc; 2309 struct rte_crypto_cipher_xform *cipher_xform; 2310 struct rte_crypto_auth_xform *auth_xform; 2311 int err, ret = 0; 2312 2313 PMD_INIT_FUNC_TRACE(); 2314 2315 if (session->ext_params.aead_ctxt.auth_cipher_text) { 2316 cipher_xform = &xform->cipher; 2317 auth_xform = &xform->next->auth; 2318 session->ctxt_type = 2319 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2320 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER; 2321 } else { 2322 cipher_xform = &xform->next->cipher; 2323 auth_xform = &xform->auth; 2324 session->ctxt_type = 2325 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2326 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH; 2327 } 2328 2329 /* Set IV parameters */ 2330 session->iv.offset = cipher_xform->iv.offset; 2331 session->iv.length = cipher_xform->iv.length; 2332 2333 /* For SEC AEAD only one descriptor is required */ 2334 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2335 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 2336 RTE_CACHE_LINE_SIZE); 2337 if (priv == NULL) { 2338 DPAA2_SEC_ERR("No Memory for priv CTXT"); 2339 return -ENOMEM; 2340 } 2341 2342 priv->fle_pool = dev_priv->fle_pool; 2343 flc = &priv->flc_desc[0].flc; 2344 2345 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length, 2346 RTE_CACHE_LINE_SIZE); 2347 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) { 2348 DPAA2_SEC_ERR("No Memory for cipher key"); 2349 rte_free(priv); 2350 return -ENOMEM; 2351 } 2352 session->cipher_key.length = cipher_xform->key.length; 2353 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length, 2354 RTE_CACHE_LINE_SIZE); 2355 if (session->auth_key.data == NULL && auth_xform->key.length > 0) { 2356 DPAA2_SEC_ERR("No Memory for auth key"); 2357 rte_free(session->cipher_key.data); 2358 rte_free(priv); 2359 return -ENOMEM; 2360 } 2361 session->auth_key.length = auth_xform->key.length; 2362 memcpy(session->cipher_key.data, cipher_xform->key.data, 2363 cipher_xform->key.length); 2364 memcpy(session->auth_key.data, auth_xform->key.data, 2365 auth_xform->key.length); 2366 2367 authdata.key = (size_t)session->auth_key.data; 2368 authdata.keylen = session->auth_key.length; 2369 authdata.key_enc_flags = 0; 2370 authdata.key_type = RTA_DATA_IMM; 2371 2372 session->digest_length = auth_xform->digest_length; 2373 2374 switch (auth_xform->algo) { 2375 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2376 authdata.algtype = OP_ALG_ALGSEL_SHA1; 2377 authdata.algmode = OP_ALG_AAI_HMAC; 2378 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 2379 break; 2380 case RTE_CRYPTO_AUTH_MD5_HMAC: 2381 authdata.algtype = OP_ALG_ALGSEL_MD5; 2382 authdata.algmode = OP_ALG_AAI_HMAC; 2383 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 2384 break; 2385 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2386 authdata.algtype = OP_ALG_ALGSEL_SHA224; 2387 authdata.algmode = OP_ALG_AAI_HMAC; 2388 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 2389 break; 2390 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2391 authdata.algtype = OP_ALG_ALGSEL_SHA256; 2392 authdata.algmode = OP_ALG_AAI_HMAC; 2393 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 2394 break; 2395 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2396 authdata.algtype = OP_ALG_ALGSEL_SHA384; 2397 authdata.algmode = OP_ALG_AAI_HMAC; 2398 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 2399 break; 2400 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2401 authdata.algtype = OP_ALG_ALGSEL_SHA512; 2402 authdata.algmode = OP_ALG_AAI_HMAC; 2403 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 2404 break; 2405 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2406 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2407 case RTE_CRYPTO_AUTH_NULL: 2408 case RTE_CRYPTO_AUTH_SHA1: 2409 case RTE_CRYPTO_AUTH_SHA256: 2410 case RTE_CRYPTO_AUTH_SHA512: 2411 case RTE_CRYPTO_AUTH_SHA224: 2412 case RTE_CRYPTO_AUTH_SHA384: 2413 case RTE_CRYPTO_AUTH_MD5: 2414 case RTE_CRYPTO_AUTH_AES_GMAC: 2415 case RTE_CRYPTO_AUTH_KASUMI_F9: 2416 case RTE_CRYPTO_AUTH_AES_CMAC: 2417 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 2418 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2419 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 2420 auth_xform->algo); 2421 ret = -ENOTSUP; 2422 goto error_out; 2423 default: 2424 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", 2425 auth_xform->algo); 2426 ret = -ENOTSUP; 2427 goto error_out; 2428 } 2429 cipherdata.key = (size_t)session->cipher_key.data; 2430 cipherdata.keylen = session->cipher_key.length; 2431 cipherdata.key_enc_flags = 0; 2432 cipherdata.key_type = RTA_DATA_IMM; 2433 2434 switch (cipher_xform->algo) { 2435 case RTE_CRYPTO_CIPHER_AES_CBC: 2436 cipherdata.algtype = OP_ALG_ALGSEL_AES; 2437 cipherdata.algmode = OP_ALG_AAI_CBC; 2438 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 2439 break; 2440 case RTE_CRYPTO_CIPHER_3DES_CBC: 2441 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 2442 cipherdata.algmode = OP_ALG_AAI_CBC; 2443 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 2444 break; 2445 case RTE_CRYPTO_CIPHER_DES_CBC: 2446 cipherdata.algtype = OP_ALG_ALGSEL_DES; 2447 cipherdata.algmode = OP_ALG_AAI_CBC; 2448 session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC; 2449 break; 2450 case RTE_CRYPTO_CIPHER_AES_CTR: 2451 cipherdata.algtype = OP_ALG_ALGSEL_AES; 2452 cipherdata.algmode = OP_ALG_AAI_CTR; 2453 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 2454 break; 2455 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2456 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2457 case RTE_CRYPTO_CIPHER_NULL: 2458 case RTE_CRYPTO_CIPHER_3DES_ECB: 2459 case RTE_CRYPTO_CIPHER_3DES_CTR: 2460 case RTE_CRYPTO_CIPHER_AES_ECB: 2461 case RTE_CRYPTO_CIPHER_KASUMI_F8: 2462 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", 2463 cipher_xform->algo); 2464 ret = -ENOTSUP; 2465 goto error_out; 2466 default: 2467 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 2468 cipher_xform->algo); 2469 ret = -ENOTSUP; 2470 goto error_out; 2471 } 2472 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2473 DIR_ENC : DIR_DEC; 2474 2475 priv->flc_desc[0].desc[0] = cipherdata.keylen; 2476 priv->flc_desc[0].desc[1] = authdata.keylen; 2477 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 2478 DESC_JOB_IO_LEN, 2479 (unsigned int *)priv->flc_desc[0].desc, 2480 &priv->flc_desc[0].desc[2], 2); 2481 2482 if (err < 0) { 2483 DPAA2_SEC_ERR("Crypto: Incorrect key lengths"); 2484 ret = -EINVAL; 2485 goto error_out; 2486 } 2487 if (priv->flc_desc[0].desc[2] & 1) { 2488 cipherdata.key_type = RTA_DATA_IMM; 2489 } else { 2490 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); 2491 cipherdata.key_type = RTA_DATA_PTR; 2492 } 2493 if (priv->flc_desc[0].desc[2] & (1 << 1)) { 2494 authdata.key_type = RTA_DATA_IMM; 2495 } else { 2496 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key); 2497 authdata.key_type = RTA_DATA_PTR; 2498 } 2499 priv->flc_desc[0].desc[0] = 0; 2500 priv->flc_desc[0].desc[1] = 0; 2501 priv->flc_desc[0].desc[2] = 0; 2502 2503 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) { 2504 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1, 2505 0, SHR_SERIAL, 2506 &cipherdata, &authdata, 2507 session->iv.length, 2508 session->digest_length, 2509 session->dir); 2510 if (bufsize < 0) { 2511 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2512 ret = -EINVAL; 2513 goto error_out; 2514 } 2515 } else { 2516 DPAA2_SEC_ERR("Hash before cipher not supported"); 2517 ret = -ENOTSUP; 2518 goto error_out; 2519 } 2520 2521 flc->word1_sdl = (uint8_t)bufsize; 2522 session->ctxt = priv; 2523 #ifdef CAAM_DESC_DEBUG 2524 int i; 2525 for (i = 0; i < bufsize; i++) 2526 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", 2527 i, priv->flc_desc[0].desc[i]); 2528 #endif 2529 2530 return ret; 2531 2532 error_out: 2533 rte_free(session->cipher_key.data); 2534 rte_free(session->auth_key.data); 2535 rte_free(priv); 2536 return ret; 2537 } 2538 2539 static int 2540 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev, 2541 struct rte_crypto_sym_xform *xform, void *sess) 2542 { 2543 dpaa2_sec_session *session = sess; 2544 int ret; 2545 2546 PMD_INIT_FUNC_TRACE(); 2547 2548 if (unlikely(sess == NULL)) { 2549 DPAA2_SEC_ERR("Invalid session struct"); 2550 return -EINVAL; 2551 } 2552 2553 memset(session, 0, sizeof(dpaa2_sec_session)); 2554 /* Default IV length = 0 */ 2555 session->iv.length = 0; 2556 2557 /* Cipher Only */ 2558 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 2559 ret = dpaa2_sec_cipher_init(dev, xform, session); 2560 2561 /* Authentication Only */ 2562 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2563 xform->next == NULL) { 2564 ret = dpaa2_sec_auth_init(dev, xform, session); 2565 2566 /* Cipher then Authenticate */ 2567 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 2568 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2569 session->ext_params.aead_ctxt.auth_cipher_text = true; 2570 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL) 2571 ret = dpaa2_sec_auth_init(dev, xform, session); 2572 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL) 2573 ret = dpaa2_sec_cipher_init(dev, xform, session); 2574 else 2575 ret = dpaa2_sec_aead_chain_init(dev, xform, session); 2576 /* Authenticate then Cipher */ 2577 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2578 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2579 session->ext_params.aead_ctxt.auth_cipher_text = false; 2580 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL) 2581 ret = dpaa2_sec_cipher_init(dev, xform, session); 2582 else if (xform->next->cipher.algo == RTE_CRYPTO_CIPHER_NULL) 2583 ret = dpaa2_sec_auth_init(dev, xform, session); 2584 else 2585 ret = dpaa2_sec_aead_chain_init(dev, xform, session); 2586 /* AEAD operation for AES-GCM kind of Algorithms */ 2587 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && 2588 xform->next == NULL) { 2589 ret = dpaa2_sec_aead_init(dev, xform, session); 2590 2591 } else { 2592 DPAA2_SEC_ERR("Invalid crypto type"); 2593 return -EINVAL; 2594 } 2595 2596 return ret; 2597 } 2598 2599 #ifdef RTE_LIBRTE_SECURITY 2600 static int 2601 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform, 2602 dpaa2_sec_session *session, 2603 struct alginfo *aeaddata) 2604 { 2605 PMD_INIT_FUNC_TRACE(); 2606 2607 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 2608 RTE_CACHE_LINE_SIZE); 2609 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 2610 DPAA2_SEC_ERR("No Memory for aead key"); 2611 return -ENOMEM; 2612 } 2613 memcpy(session->aead_key.data, aead_xform->key.data, 2614 aead_xform->key.length); 2615 2616 session->digest_length = aead_xform->digest_length; 2617 session->aead_key.length = aead_xform->key.length; 2618 2619 aeaddata->key = (size_t)session->aead_key.data; 2620 aeaddata->keylen = session->aead_key.length; 2621 aeaddata->key_enc_flags = 0; 2622 aeaddata->key_type = RTA_DATA_IMM; 2623 2624 switch (aead_xform->algo) { 2625 case RTE_CRYPTO_AEAD_AES_GCM: 2626 switch (session->digest_length) { 2627 case 8: 2628 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM8; 2629 break; 2630 case 12: 2631 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM12; 2632 break; 2633 case 16: 2634 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM16; 2635 break; 2636 default: 2637 DPAA2_SEC_ERR("Crypto: Undefined GCM digest %d", 2638 session->digest_length); 2639 return -EINVAL; 2640 } 2641 aeaddata->algmode = OP_ALG_AAI_GCM; 2642 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; 2643 break; 2644 case RTE_CRYPTO_AEAD_AES_CCM: 2645 switch (session->digest_length) { 2646 case 8: 2647 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM8; 2648 break; 2649 case 12: 2650 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM12; 2651 break; 2652 case 16: 2653 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM16; 2654 break; 2655 default: 2656 DPAA2_SEC_ERR("Crypto: Undefined CCM digest %d", 2657 session->digest_length); 2658 return -EINVAL; 2659 } 2660 aeaddata->algmode = OP_ALG_AAI_CCM; 2661 session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM; 2662 break; 2663 default: 2664 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u", 2665 aead_xform->algo); 2666 return -ENOTSUP; 2667 } 2668 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 2669 DIR_ENC : DIR_DEC; 2670 2671 return 0; 2672 } 2673 2674 static int 2675 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform, 2676 struct rte_crypto_auth_xform *auth_xform, 2677 dpaa2_sec_session *session, 2678 struct alginfo *cipherdata, 2679 struct alginfo *authdata) 2680 { 2681 if (cipher_xform) { 2682 session->cipher_key.data = rte_zmalloc(NULL, 2683 cipher_xform->key.length, 2684 RTE_CACHE_LINE_SIZE); 2685 if (session->cipher_key.data == NULL && 2686 cipher_xform->key.length > 0) { 2687 DPAA2_SEC_ERR("No Memory for cipher key"); 2688 return -ENOMEM; 2689 } 2690 2691 session->cipher_key.length = cipher_xform->key.length; 2692 memcpy(session->cipher_key.data, cipher_xform->key.data, 2693 cipher_xform->key.length); 2694 session->cipher_alg = cipher_xform->algo; 2695 } else { 2696 session->cipher_key.data = NULL; 2697 session->cipher_key.length = 0; 2698 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2699 } 2700 2701 if (auth_xform) { 2702 session->auth_key.data = rte_zmalloc(NULL, 2703 auth_xform->key.length, 2704 RTE_CACHE_LINE_SIZE); 2705 if (session->auth_key.data == NULL && 2706 auth_xform->key.length > 0) { 2707 DPAA2_SEC_ERR("No Memory for auth key"); 2708 return -ENOMEM; 2709 } 2710 session->auth_key.length = auth_xform->key.length; 2711 memcpy(session->auth_key.data, auth_xform->key.data, 2712 auth_xform->key.length); 2713 session->auth_alg = auth_xform->algo; 2714 session->digest_length = auth_xform->digest_length; 2715 } else { 2716 session->auth_key.data = NULL; 2717 session->auth_key.length = 0; 2718 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 2719 } 2720 2721 authdata->key = (size_t)session->auth_key.data; 2722 authdata->keylen = session->auth_key.length; 2723 authdata->key_enc_flags = 0; 2724 authdata->key_type = RTA_DATA_IMM; 2725 switch (session->auth_alg) { 2726 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2727 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96; 2728 authdata->algmode = OP_ALG_AAI_HMAC; 2729 break; 2730 case RTE_CRYPTO_AUTH_MD5_HMAC: 2731 authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96; 2732 authdata->algmode = OP_ALG_AAI_HMAC; 2733 break; 2734 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2735 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128; 2736 authdata->algmode = OP_ALG_AAI_HMAC; 2737 if (session->digest_length != 16) 2738 DPAA2_SEC_WARN( 2739 "+++Using sha256-hmac truncated len is non-standard," 2740 "it will not work with lookaside proto"); 2741 break; 2742 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2743 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192; 2744 authdata->algmode = OP_ALG_AAI_HMAC; 2745 break; 2746 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2747 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256; 2748 authdata->algmode = OP_ALG_AAI_HMAC; 2749 break; 2750 case RTE_CRYPTO_AUTH_AES_CMAC: 2751 authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96; 2752 break; 2753 case RTE_CRYPTO_AUTH_NULL: 2754 authdata->algtype = OP_PCL_IPSEC_HMAC_NULL; 2755 break; 2756 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2757 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2758 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2759 case RTE_CRYPTO_AUTH_SHA1: 2760 case RTE_CRYPTO_AUTH_SHA256: 2761 case RTE_CRYPTO_AUTH_SHA512: 2762 case RTE_CRYPTO_AUTH_SHA224: 2763 case RTE_CRYPTO_AUTH_SHA384: 2764 case RTE_CRYPTO_AUTH_MD5: 2765 case RTE_CRYPTO_AUTH_AES_GMAC: 2766 case RTE_CRYPTO_AUTH_KASUMI_F9: 2767 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 2768 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2769 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 2770 session->auth_alg); 2771 return -ENOTSUP; 2772 default: 2773 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", 2774 session->auth_alg); 2775 return -ENOTSUP; 2776 } 2777 cipherdata->key = (size_t)session->cipher_key.data; 2778 cipherdata->keylen = session->cipher_key.length; 2779 cipherdata->key_enc_flags = 0; 2780 cipherdata->key_type = RTA_DATA_IMM; 2781 2782 switch (session->cipher_alg) { 2783 case RTE_CRYPTO_CIPHER_AES_CBC: 2784 cipherdata->algtype = OP_PCL_IPSEC_AES_CBC; 2785 cipherdata->algmode = OP_ALG_AAI_CBC; 2786 break; 2787 case RTE_CRYPTO_CIPHER_3DES_CBC: 2788 cipherdata->algtype = OP_PCL_IPSEC_3DES; 2789 cipherdata->algmode = OP_ALG_AAI_CBC; 2790 break; 2791 case RTE_CRYPTO_CIPHER_DES_CBC: 2792 cipherdata->algtype = OP_PCL_IPSEC_DES; 2793 cipherdata->algmode = OP_ALG_AAI_CBC; 2794 break; 2795 case RTE_CRYPTO_CIPHER_AES_CTR: 2796 cipherdata->algtype = OP_PCL_IPSEC_AES_CTR; 2797 cipherdata->algmode = OP_ALG_AAI_CTR; 2798 break; 2799 case RTE_CRYPTO_CIPHER_NULL: 2800 cipherdata->algtype = OP_PCL_IPSEC_NULL; 2801 break; 2802 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2803 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2804 case RTE_CRYPTO_CIPHER_3DES_ECB: 2805 case RTE_CRYPTO_CIPHER_3DES_CTR: 2806 case RTE_CRYPTO_CIPHER_AES_ECB: 2807 case RTE_CRYPTO_CIPHER_KASUMI_F8: 2808 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", 2809 session->cipher_alg); 2810 return -ENOTSUP; 2811 default: 2812 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 2813 session->cipher_alg); 2814 return -ENOTSUP; 2815 } 2816 2817 return 0; 2818 } 2819 2820 #ifdef RTE_LIBRTE_SECURITY_TEST 2821 static uint8_t aes_cbc_iv[] = { 2822 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 2823 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }; 2824 #endif 2825 2826 static int 2827 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, 2828 struct rte_security_session_conf *conf, 2829 void *sess) 2830 { 2831 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec; 2832 struct rte_crypto_cipher_xform *cipher_xform = NULL; 2833 struct rte_crypto_auth_xform *auth_xform = NULL; 2834 struct rte_crypto_aead_xform *aead_xform = NULL; 2835 dpaa2_sec_session *session = (dpaa2_sec_session *)sess; 2836 struct ctxt_priv *priv; 2837 struct alginfo authdata, cipherdata; 2838 int bufsize; 2839 struct sec_flow_context *flc; 2840 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 2841 int ret = -1; 2842 2843 PMD_INIT_FUNC_TRACE(); 2844 2845 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2846 sizeof(struct ctxt_priv) + 2847 sizeof(struct sec_flc_desc), 2848 RTE_CACHE_LINE_SIZE); 2849 2850 if (priv == NULL) { 2851 DPAA2_SEC_ERR("No memory for priv CTXT"); 2852 return -ENOMEM; 2853 } 2854 2855 priv->fle_pool = dev_priv->fle_pool; 2856 flc = &priv->flc_desc[0].flc; 2857 2858 memset(session, 0, sizeof(dpaa2_sec_session)); 2859 2860 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2861 cipher_xform = &conf->crypto_xform->cipher; 2862 if (conf->crypto_xform->next) 2863 auth_xform = &conf->crypto_xform->next->auth; 2864 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform, 2865 session, &cipherdata, &authdata); 2866 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2867 auth_xform = &conf->crypto_xform->auth; 2868 if (conf->crypto_xform->next) 2869 cipher_xform = &conf->crypto_xform->next->cipher; 2870 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform, 2871 session, &cipherdata, &authdata); 2872 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { 2873 aead_xform = &conf->crypto_xform->aead; 2874 ret = dpaa2_sec_ipsec_aead_init(aead_xform, 2875 session, &cipherdata); 2876 authdata.keylen = 0; 2877 authdata.algtype = 0; 2878 } else { 2879 DPAA2_SEC_ERR("XFORM not specified"); 2880 ret = -EINVAL; 2881 goto out; 2882 } 2883 if (ret) { 2884 DPAA2_SEC_ERR("Failed to process xform"); 2885 goto out; 2886 } 2887 2888 session->ctxt_type = DPAA2_SEC_IPSEC; 2889 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 2890 uint8_t *hdr = NULL; 2891 struct ip ip4_hdr; 2892 struct rte_ipv6_hdr ip6_hdr; 2893 struct ipsec_encap_pdb encap_pdb; 2894 2895 flc->dhr = SEC_FLC_DHR_OUTBOUND; 2896 /* For Sec Proto only one descriptor is required. */ 2897 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb)); 2898 2899 /* copy algo specific data to PDB */ 2900 switch (cipherdata.algtype) { 2901 case OP_PCL_IPSEC_AES_CTR: 2902 encap_pdb.ctr.ctr_initial = 0x00000001; 2903 encap_pdb.ctr.ctr_nonce = ipsec_xform->salt; 2904 break; 2905 case OP_PCL_IPSEC_AES_GCM8: 2906 case OP_PCL_IPSEC_AES_GCM12: 2907 case OP_PCL_IPSEC_AES_GCM16: 2908 memcpy(encap_pdb.gcm.salt, 2909 (uint8_t *)&(ipsec_xform->salt), 4); 2910 break; 2911 } 2912 2913 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) | 2914 PDBOPTS_ESP_OIHI_PDB_INL | 2915 PDBOPTS_ESP_IVSRC | 2916 PDBHMO_ESP_ENCAP_DTTL | 2917 PDBHMO_ESP_SNR; 2918 if (ipsec_xform->options.esn) 2919 encap_pdb.options |= PDBOPTS_ESP_ESN; 2920 encap_pdb.spi = ipsec_xform->spi; 2921 session->dir = DIR_ENC; 2922 if (ipsec_xform->tunnel.type == 2923 RTE_SECURITY_IPSEC_TUNNEL_IPV4) { 2924 encap_pdb.ip_hdr_len = sizeof(struct ip); 2925 ip4_hdr.ip_v = IPVERSION; 2926 ip4_hdr.ip_hl = 5; 2927 ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr)); 2928 ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp; 2929 ip4_hdr.ip_id = 0; 2930 ip4_hdr.ip_off = 0; 2931 ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl; 2932 ip4_hdr.ip_p = IPPROTO_ESP; 2933 ip4_hdr.ip_sum = 0; 2934 ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip; 2935 ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip; 2936 ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *) 2937 &ip4_hdr, sizeof(struct ip)); 2938 hdr = (uint8_t *)&ip4_hdr; 2939 } else if (ipsec_xform->tunnel.type == 2940 RTE_SECURITY_IPSEC_TUNNEL_IPV6) { 2941 ip6_hdr.vtc_flow = rte_cpu_to_be_32( 2942 DPAA2_IPv6_DEFAULT_VTC_FLOW | 2943 ((ipsec_xform->tunnel.ipv6.dscp << 2944 RTE_IPV6_HDR_TC_SHIFT) & 2945 RTE_IPV6_HDR_TC_MASK) | 2946 ((ipsec_xform->tunnel.ipv6.flabel << 2947 RTE_IPV6_HDR_FL_SHIFT) & 2948 RTE_IPV6_HDR_FL_MASK)); 2949 /* Payload length will be updated by HW */ 2950 ip6_hdr.payload_len = 0; 2951 ip6_hdr.hop_limits = 2952 ipsec_xform->tunnel.ipv6.hlimit; 2953 ip6_hdr.proto = (ipsec_xform->proto == 2954 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? 2955 IPPROTO_ESP : IPPROTO_AH; 2956 memcpy(&ip6_hdr.src_addr, 2957 &ipsec_xform->tunnel.ipv6.src_addr, 16); 2958 memcpy(&ip6_hdr.dst_addr, 2959 &ipsec_xform->tunnel.ipv6.dst_addr, 16); 2960 encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr); 2961 hdr = (uint8_t *)&ip6_hdr; 2962 } 2963 2964 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc, 2965 1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ? 2966 SHR_WAIT : SHR_SERIAL, &encap_pdb, 2967 hdr, &cipherdata, &authdata); 2968 } else if (ipsec_xform->direction == 2969 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { 2970 struct ipsec_decap_pdb decap_pdb; 2971 2972 flc->dhr = SEC_FLC_DHR_INBOUND; 2973 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb)); 2974 /* copy algo specific data to PDB */ 2975 switch (cipherdata.algtype) { 2976 case OP_PCL_IPSEC_AES_CTR: 2977 decap_pdb.ctr.ctr_initial = 0x00000001; 2978 decap_pdb.ctr.ctr_nonce = ipsec_xform->salt; 2979 break; 2980 case OP_PCL_IPSEC_AES_GCM8: 2981 case OP_PCL_IPSEC_AES_GCM12: 2982 case OP_PCL_IPSEC_AES_GCM16: 2983 memcpy(decap_pdb.gcm.salt, 2984 (uint8_t *)&(ipsec_xform->salt), 4); 2985 break; 2986 } 2987 2988 decap_pdb.options = (ipsec_xform->tunnel.type == 2989 RTE_SECURITY_IPSEC_TUNNEL_IPV4) ? 2990 sizeof(struct ip) << 16 : 2991 sizeof(struct rte_ipv6_hdr) << 16; 2992 if (ipsec_xform->options.esn) 2993 decap_pdb.options |= PDBOPTS_ESP_ESN; 2994 2995 if (ipsec_xform->replay_win_sz) { 2996 uint32_t win_sz; 2997 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz); 2998 2999 if (rta_sec_era < RTA_SEC_ERA_10 && win_sz > 128) { 3000 DPAA2_SEC_INFO("Max Anti replay Win sz = 128"); 3001 win_sz = 128; 3002 } 3003 switch (win_sz) { 3004 case 1: 3005 case 2: 3006 case 4: 3007 case 8: 3008 case 16: 3009 case 32: 3010 decap_pdb.options |= PDBOPTS_ESP_ARS32; 3011 break; 3012 case 64: 3013 decap_pdb.options |= PDBOPTS_ESP_ARS64; 3014 break; 3015 case 256: 3016 decap_pdb.options |= PDBOPTS_ESP_ARS256; 3017 break; 3018 case 512: 3019 decap_pdb.options |= PDBOPTS_ESP_ARS512; 3020 break; 3021 case 1024: 3022 decap_pdb.options |= PDBOPTS_ESP_ARS1024; 3023 break; 3024 case 128: 3025 default: 3026 decap_pdb.options |= PDBOPTS_ESP_ARS128; 3027 } 3028 } 3029 session->dir = DIR_DEC; 3030 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc, 3031 1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ? 3032 SHR_WAIT : SHR_SERIAL, 3033 &decap_pdb, &cipherdata, &authdata); 3034 } else 3035 goto out; 3036 3037 if (bufsize < 0) { 3038 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 3039 goto out; 3040 } 3041 3042 flc->word1_sdl = (uint8_t)bufsize; 3043 3044 /* Enable the stashing control bit */ 3045 DPAA2_SET_FLC_RSC(flc); 3046 flc->word2_rflc_31_0 = lower_32_bits( 3047 (size_t)&(((struct dpaa2_sec_qp *) 3048 dev->data->queue_pairs[0])->rx_vq) | 0x14); 3049 flc->word3_rflc_63_32 = upper_32_bits( 3050 (size_t)&(((struct dpaa2_sec_qp *) 3051 dev->data->queue_pairs[0])->rx_vq)); 3052 3053 /* Set EWS bit i.e. enable write-safe */ 3054 DPAA2_SET_FLC_EWS(flc); 3055 /* Set BS = 1 i.e reuse input buffers as output buffers */ 3056 DPAA2_SET_FLC_REUSE_BS(flc); 3057 /* Set FF = 10; reuse input buffers if they provide sufficient space */ 3058 DPAA2_SET_FLC_REUSE_FF(flc); 3059 3060 session->ctxt = priv; 3061 3062 return 0; 3063 out: 3064 rte_free(session->auth_key.data); 3065 rte_free(session->cipher_key.data); 3066 rte_free(priv); 3067 return ret; 3068 } 3069 3070 static int 3071 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev, 3072 struct rte_security_session_conf *conf, 3073 void *sess) 3074 { 3075 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp; 3076 struct rte_crypto_sym_xform *xform = conf->crypto_xform; 3077 struct rte_crypto_auth_xform *auth_xform = NULL; 3078 struct rte_crypto_cipher_xform *cipher_xform; 3079 dpaa2_sec_session *session = (dpaa2_sec_session *)sess; 3080 struct ctxt_priv *priv; 3081 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 3082 struct alginfo authdata, cipherdata; 3083 struct alginfo *p_authdata = NULL; 3084 int bufsize = -1; 3085 struct sec_flow_context *flc; 3086 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 3087 int swap = true; 3088 #else 3089 int swap = false; 3090 #endif 3091 3092 PMD_INIT_FUNC_TRACE(); 3093 3094 memset(session, 0, sizeof(dpaa2_sec_session)); 3095 3096 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 3097 sizeof(struct ctxt_priv) + 3098 sizeof(struct sec_flc_desc), 3099 RTE_CACHE_LINE_SIZE); 3100 3101 if (priv == NULL) { 3102 DPAA2_SEC_ERR("No memory for priv CTXT"); 3103 return -ENOMEM; 3104 } 3105 3106 priv->fle_pool = dev_priv->fle_pool; 3107 flc = &priv->flc_desc[0].flc; 3108 3109 /* find xfrm types */ 3110 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 3111 cipher_xform = &xform->cipher; 3112 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 3113 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 3114 session->ext_params.aead_ctxt.auth_cipher_text = true; 3115 cipher_xform = &xform->cipher; 3116 auth_xform = &xform->next->auth; 3117 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 3118 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 3119 session->ext_params.aead_ctxt.auth_cipher_text = false; 3120 cipher_xform = &xform->next->cipher; 3121 auth_xform = &xform->auth; 3122 } else { 3123 DPAA2_SEC_ERR("Invalid crypto type"); 3124 return -EINVAL; 3125 } 3126 3127 session->ctxt_type = DPAA2_SEC_PDCP; 3128 if (cipher_xform) { 3129 session->cipher_key.data = rte_zmalloc(NULL, 3130 cipher_xform->key.length, 3131 RTE_CACHE_LINE_SIZE); 3132 if (session->cipher_key.data == NULL && 3133 cipher_xform->key.length > 0) { 3134 DPAA2_SEC_ERR("No Memory for cipher key"); 3135 rte_free(priv); 3136 return -ENOMEM; 3137 } 3138 session->cipher_key.length = cipher_xform->key.length; 3139 memcpy(session->cipher_key.data, cipher_xform->key.data, 3140 cipher_xform->key.length); 3141 session->dir = 3142 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 3143 DIR_ENC : DIR_DEC; 3144 session->cipher_alg = cipher_xform->algo; 3145 } else { 3146 session->cipher_key.data = NULL; 3147 session->cipher_key.length = 0; 3148 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 3149 session->dir = DIR_ENC; 3150 } 3151 3152 session->pdcp.domain = pdcp_xform->domain; 3153 session->pdcp.bearer = pdcp_xform->bearer; 3154 session->pdcp.pkt_dir = pdcp_xform->pkt_dir; 3155 session->pdcp.sn_size = pdcp_xform->sn_size; 3156 session->pdcp.hfn = pdcp_xform->hfn; 3157 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold; 3158 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd; 3159 /* hfv ovd offset location is stored in iv.offset value*/ 3160 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset; 3161 3162 cipherdata.key = (size_t)session->cipher_key.data; 3163 cipherdata.keylen = session->cipher_key.length; 3164 cipherdata.key_enc_flags = 0; 3165 cipherdata.key_type = RTA_DATA_IMM; 3166 3167 switch (session->cipher_alg) { 3168 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 3169 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW; 3170 break; 3171 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 3172 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC; 3173 break; 3174 case RTE_CRYPTO_CIPHER_AES_CTR: 3175 cipherdata.algtype = PDCP_CIPHER_TYPE_AES; 3176 break; 3177 case RTE_CRYPTO_CIPHER_NULL: 3178 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL; 3179 break; 3180 default: 3181 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 3182 session->cipher_alg); 3183 goto out; 3184 } 3185 3186 if (auth_xform) { 3187 session->auth_key.data = rte_zmalloc(NULL, 3188 auth_xform->key.length, 3189 RTE_CACHE_LINE_SIZE); 3190 if (!session->auth_key.data && 3191 auth_xform->key.length > 0) { 3192 DPAA2_SEC_ERR("No Memory for auth key"); 3193 rte_free(session->cipher_key.data); 3194 rte_free(priv); 3195 return -ENOMEM; 3196 } 3197 session->auth_key.length = auth_xform->key.length; 3198 memcpy(session->auth_key.data, auth_xform->key.data, 3199 auth_xform->key.length); 3200 session->auth_alg = auth_xform->algo; 3201 } else { 3202 session->auth_key.data = NULL; 3203 session->auth_key.length = 0; 3204 session->auth_alg = 0; 3205 } 3206 authdata.key = (size_t)session->auth_key.data; 3207 authdata.keylen = session->auth_key.length; 3208 authdata.key_enc_flags = 0; 3209 authdata.key_type = RTA_DATA_IMM; 3210 3211 if (session->auth_alg) { 3212 switch (session->auth_alg) { 3213 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 3214 authdata.algtype = PDCP_AUTH_TYPE_SNOW; 3215 break; 3216 case RTE_CRYPTO_AUTH_ZUC_EIA3: 3217 authdata.algtype = PDCP_AUTH_TYPE_ZUC; 3218 break; 3219 case RTE_CRYPTO_AUTH_AES_CMAC: 3220 authdata.algtype = PDCP_AUTH_TYPE_AES; 3221 break; 3222 case RTE_CRYPTO_AUTH_NULL: 3223 authdata.algtype = PDCP_AUTH_TYPE_NULL; 3224 break; 3225 default: 3226 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 3227 session->auth_alg); 3228 goto out; 3229 } 3230 3231 p_authdata = &authdata; 3232 } else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 3233 DPAA2_SEC_ERR("Crypto: Integrity must for c-plane"); 3234 goto out; 3235 } 3236 3237 if (rta_inline_pdcp_query(authdata.algtype, 3238 cipherdata.algtype, 3239 session->pdcp.sn_size, 3240 session->pdcp.hfn_ovd)) { 3241 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); 3242 cipherdata.key_type = RTA_DATA_PTR; 3243 } 3244 3245 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 3246 if (session->dir == DIR_ENC) 3247 bufsize = cnstr_shdsc_pdcp_c_plane_encap( 3248 priv->flc_desc[0].desc, 1, swap, 3249 pdcp_xform->hfn, 3250 session->pdcp.sn_size, 3251 pdcp_xform->bearer, 3252 pdcp_xform->pkt_dir, 3253 pdcp_xform->hfn_threshold, 3254 &cipherdata, &authdata, 3255 0); 3256 else if (session->dir == DIR_DEC) 3257 bufsize = cnstr_shdsc_pdcp_c_plane_decap( 3258 priv->flc_desc[0].desc, 1, swap, 3259 pdcp_xform->hfn, 3260 session->pdcp.sn_size, 3261 pdcp_xform->bearer, 3262 pdcp_xform->pkt_dir, 3263 pdcp_xform->hfn_threshold, 3264 &cipherdata, &authdata, 3265 0); 3266 } else { 3267 if (session->dir == DIR_ENC) 3268 bufsize = cnstr_shdsc_pdcp_u_plane_encap( 3269 priv->flc_desc[0].desc, 1, swap, 3270 session->pdcp.sn_size, 3271 pdcp_xform->hfn, 3272 pdcp_xform->bearer, 3273 pdcp_xform->pkt_dir, 3274 pdcp_xform->hfn_threshold, 3275 &cipherdata, p_authdata, 0); 3276 else if (session->dir == DIR_DEC) 3277 bufsize = cnstr_shdsc_pdcp_u_plane_decap( 3278 priv->flc_desc[0].desc, 1, swap, 3279 session->pdcp.sn_size, 3280 pdcp_xform->hfn, 3281 pdcp_xform->bearer, 3282 pdcp_xform->pkt_dir, 3283 pdcp_xform->hfn_threshold, 3284 &cipherdata, p_authdata, 0); 3285 } 3286 3287 if (bufsize < 0) { 3288 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 3289 goto out; 3290 } 3291 3292 /* Enable the stashing control bit */ 3293 DPAA2_SET_FLC_RSC(flc); 3294 flc->word2_rflc_31_0 = lower_32_bits( 3295 (size_t)&(((struct dpaa2_sec_qp *) 3296 dev->data->queue_pairs[0])->rx_vq) | 0x14); 3297 flc->word3_rflc_63_32 = upper_32_bits( 3298 (size_t)&(((struct dpaa2_sec_qp *) 3299 dev->data->queue_pairs[0])->rx_vq)); 3300 3301 flc->word1_sdl = (uint8_t)bufsize; 3302 3303 /* TODO - check the perf impact or 3304 * align as per descriptor type 3305 * Set EWS bit i.e. enable write-safe 3306 * DPAA2_SET_FLC_EWS(flc); 3307 */ 3308 3309 /* Set BS = 1 i.e reuse input buffers as output buffers */ 3310 DPAA2_SET_FLC_REUSE_BS(flc); 3311 /* Set FF = 10; reuse input buffers if they provide sufficient space */ 3312 DPAA2_SET_FLC_REUSE_FF(flc); 3313 3314 session->ctxt = priv; 3315 3316 return 0; 3317 out: 3318 rte_free(session->auth_key.data); 3319 rte_free(session->cipher_key.data); 3320 rte_free(priv); 3321 return -EINVAL; 3322 } 3323 3324 static int 3325 dpaa2_sec_security_session_create(void *dev, 3326 struct rte_security_session_conf *conf, 3327 struct rte_security_session *sess, 3328 struct rte_mempool *mempool) 3329 { 3330 void *sess_private_data; 3331 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; 3332 int ret; 3333 3334 if (rte_mempool_get(mempool, &sess_private_data)) { 3335 DPAA2_SEC_ERR("Couldn't get object from session mempool"); 3336 return -ENOMEM; 3337 } 3338 3339 switch (conf->protocol) { 3340 case RTE_SECURITY_PROTOCOL_IPSEC: 3341 ret = dpaa2_sec_set_ipsec_session(cdev, conf, 3342 sess_private_data); 3343 break; 3344 case RTE_SECURITY_PROTOCOL_MACSEC: 3345 return -ENOTSUP; 3346 case RTE_SECURITY_PROTOCOL_PDCP: 3347 ret = dpaa2_sec_set_pdcp_session(cdev, conf, 3348 sess_private_data); 3349 break; 3350 default: 3351 return -EINVAL; 3352 } 3353 if (ret != 0) { 3354 DPAA2_SEC_ERR("Failed to configure session parameters"); 3355 /* Return session to mempool */ 3356 rte_mempool_put(mempool, sess_private_data); 3357 return ret; 3358 } 3359 3360 set_sec_session_private_data(sess, sess_private_data); 3361 3362 return ret; 3363 } 3364 3365 /** Clear the memory of session so it doesn't leave key material behind */ 3366 static int 3367 dpaa2_sec_security_session_destroy(void *dev __rte_unused, 3368 struct rte_security_session *sess) 3369 { 3370 PMD_INIT_FUNC_TRACE(); 3371 void *sess_priv = get_sec_session_private_data(sess); 3372 3373 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv; 3374 3375 if (sess_priv) { 3376 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 3377 3378 rte_free(s->ctxt); 3379 rte_free(s->cipher_key.data); 3380 rte_free(s->auth_key.data); 3381 memset(s, 0, sizeof(dpaa2_sec_session)); 3382 set_sec_session_private_data(sess, NULL); 3383 rte_mempool_put(sess_mp, sess_priv); 3384 } 3385 return 0; 3386 } 3387 #endif 3388 static int 3389 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev, 3390 struct rte_crypto_sym_xform *xform, 3391 struct rte_cryptodev_sym_session *sess, 3392 struct rte_mempool *mempool) 3393 { 3394 void *sess_private_data; 3395 int ret; 3396 3397 if (rte_mempool_get(mempool, &sess_private_data)) { 3398 DPAA2_SEC_ERR("Couldn't get object from session mempool"); 3399 return -ENOMEM; 3400 } 3401 3402 ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data); 3403 if (ret != 0) { 3404 DPAA2_SEC_ERR("Failed to configure session parameters"); 3405 /* Return session to mempool */ 3406 rte_mempool_put(mempool, sess_private_data); 3407 return ret; 3408 } 3409 3410 set_sym_session_private_data(sess, dev->driver_id, 3411 sess_private_data); 3412 3413 return 0; 3414 } 3415 3416 /** Clear the memory of session so it doesn't leave key material behind */ 3417 static void 3418 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev, 3419 struct rte_cryptodev_sym_session *sess) 3420 { 3421 PMD_INIT_FUNC_TRACE(); 3422 uint8_t index = dev->driver_id; 3423 void *sess_priv = get_sym_session_private_data(sess, index); 3424 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv; 3425 3426 if (sess_priv) { 3427 rte_free(s->ctxt); 3428 rte_free(s->cipher_key.data); 3429 rte_free(s->auth_key.data); 3430 memset(s, 0, sizeof(dpaa2_sec_session)); 3431 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 3432 set_sym_session_private_data(sess, index, NULL); 3433 rte_mempool_put(sess_mp, sess_priv); 3434 } 3435 } 3436 3437 static int 3438 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused, 3439 struct rte_cryptodev_config *config __rte_unused) 3440 { 3441 PMD_INIT_FUNC_TRACE(); 3442 3443 return 0; 3444 } 3445 3446 static int 3447 dpaa2_sec_dev_start(struct rte_cryptodev *dev) 3448 { 3449 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3450 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3451 struct dpseci_attr attr; 3452 struct dpaa2_queue *dpaa2_q; 3453 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3454 dev->data->queue_pairs; 3455 struct dpseci_rx_queue_attr rx_attr; 3456 struct dpseci_tx_queue_attr tx_attr; 3457 int ret, i; 3458 3459 PMD_INIT_FUNC_TRACE(); 3460 3461 memset(&attr, 0, sizeof(struct dpseci_attr)); 3462 3463 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token); 3464 if (ret) { 3465 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED", 3466 priv->hw_id); 3467 goto get_attr_failure; 3468 } 3469 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr); 3470 if (ret) { 3471 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC"); 3472 goto get_attr_failure; 3473 } 3474 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) { 3475 dpaa2_q = &qp[i]->rx_vq; 3476 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 3477 &rx_attr); 3478 dpaa2_q->fqid = rx_attr.fqid; 3479 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid); 3480 } 3481 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) { 3482 dpaa2_q = &qp[i]->tx_vq; 3483 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 3484 &tx_attr); 3485 dpaa2_q->fqid = tx_attr.fqid; 3486 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid); 3487 } 3488 3489 return 0; 3490 get_attr_failure: 3491 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 3492 return -1; 3493 } 3494 3495 static void 3496 dpaa2_sec_dev_stop(struct rte_cryptodev *dev) 3497 { 3498 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3499 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3500 int ret; 3501 3502 PMD_INIT_FUNC_TRACE(); 3503 3504 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 3505 if (ret) { 3506 DPAA2_SEC_ERR("Failure in disabling dpseci %d device", 3507 priv->hw_id); 3508 return; 3509 } 3510 3511 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token); 3512 if (ret < 0) { 3513 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret); 3514 return; 3515 } 3516 } 3517 3518 static int 3519 dpaa2_sec_dev_close(struct rte_cryptodev *dev) 3520 { 3521 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3522 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3523 int ret; 3524 3525 PMD_INIT_FUNC_TRACE(); 3526 3527 /* Function is reverse of dpaa2_sec_dev_init. 3528 * It does the following: 3529 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id 3530 * 2. Close the DPSECI device 3531 * 3. Free the allocated resources. 3532 */ 3533 3534 /*Close the device at underlying layer*/ 3535 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token); 3536 if (ret) { 3537 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret); 3538 return -1; 3539 } 3540 3541 /*Free the allocated memory for ethernet private data and dpseci*/ 3542 priv->hw = NULL; 3543 rte_free(dpseci); 3544 3545 return 0; 3546 } 3547 3548 static void 3549 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev, 3550 struct rte_cryptodev_info *info) 3551 { 3552 struct dpaa2_sec_dev_private *internals = dev->data->dev_private; 3553 3554 PMD_INIT_FUNC_TRACE(); 3555 if (info != NULL) { 3556 info->max_nb_queue_pairs = internals->max_nb_queue_pairs; 3557 info->feature_flags = dev->feature_flags; 3558 info->capabilities = dpaa2_sec_capabilities; 3559 /* No limit of number of sessions */ 3560 info->sym.max_nb_sessions = 0; 3561 info->driver_id = cryptodev_driver_id; 3562 } 3563 } 3564 3565 static 3566 void dpaa2_sec_stats_get(struct rte_cryptodev *dev, 3567 struct rte_cryptodev_stats *stats) 3568 { 3569 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3570 struct fsl_mc_io dpseci; 3571 struct dpseci_sec_counters counters = {0}; 3572 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3573 dev->data->queue_pairs; 3574 int ret, i; 3575 3576 PMD_INIT_FUNC_TRACE(); 3577 if (stats == NULL) { 3578 DPAA2_SEC_ERR("Invalid stats ptr NULL"); 3579 return; 3580 } 3581 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 3582 if (qp == NULL || qp[i] == NULL) { 3583 DPAA2_SEC_DEBUG("Uninitialised queue pair"); 3584 continue; 3585 } 3586 3587 stats->enqueued_count += qp[i]->tx_vq.tx_pkts; 3588 stats->dequeued_count += qp[i]->rx_vq.rx_pkts; 3589 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts; 3590 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts; 3591 } 3592 3593 /* In case as secondary process access stats, MCP portal in priv-hw 3594 * may have primary process address. Need the secondary process 3595 * based MCP portal address for this object. 3596 */ 3597 dpseci.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX); 3598 ret = dpseci_get_sec_counters(&dpseci, CMD_PRI_LOW, priv->token, 3599 &counters); 3600 if (ret) { 3601 DPAA2_SEC_ERR("SEC counters failed"); 3602 } else { 3603 DPAA2_SEC_INFO("dpseci hardware stats:" 3604 "\n\tNum of Requests Dequeued = %" PRIu64 3605 "\n\tNum of Outbound Encrypt Requests = %" PRIu64 3606 "\n\tNum of Inbound Decrypt Requests = %" PRIu64 3607 "\n\tNum of Outbound Bytes Encrypted = %" PRIu64 3608 "\n\tNum of Outbound Bytes Protected = %" PRIu64 3609 "\n\tNum of Inbound Bytes Decrypted = %" PRIu64 3610 "\n\tNum of Inbound Bytes Validated = %" PRIu64, 3611 counters.dequeued_requests, 3612 counters.ob_enc_requests, 3613 counters.ib_dec_requests, 3614 counters.ob_enc_bytes, 3615 counters.ob_prot_bytes, 3616 counters.ib_dec_bytes, 3617 counters.ib_valid_bytes); 3618 } 3619 } 3620 3621 static 3622 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev) 3623 { 3624 int i; 3625 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3626 (dev->data->queue_pairs); 3627 3628 PMD_INIT_FUNC_TRACE(); 3629 3630 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 3631 if (qp[i] == NULL) { 3632 DPAA2_SEC_DEBUG("Uninitialised queue pair"); 3633 continue; 3634 } 3635 qp[i]->tx_vq.rx_pkts = 0; 3636 qp[i]->tx_vq.tx_pkts = 0; 3637 qp[i]->tx_vq.err_pkts = 0; 3638 qp[i]->rx_vq.rx_pkts = 0; 3639 qp[i]->rx_vq.tx_pkts = 0; 3640 qp[i]->rx_vq.err_pkts = 0; 3641 } 3642 } 3643 3644 static void __rte_hot 3645 dpaa2_sec_process_parallel_event(struct qbman_swp *swp, 3646 const struct qbman_fd *fd, 3647 const struct qbman_result *dq, 3648 struct dpaa2_queue *rxq, 3649 struct rte_event *ev) 3650 { 3651 /* Prefetching mbuf */ 3652 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)- 3653 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size)); 3654 3655 /* Prefetching ipsec crypto_op stored in priv data of mbuf */ 3656 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64)); 3657 3658 ev->flow_id = rxq->ev.flow_id; 3659 ev->sub_event_type = rxq->ev.sub_event_type; 3660 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3661 ev->op = RTE_EVENT_OP_NEW; 3662 ev->sched_type = rxq->ev.sched_type; 3663 ev->queue_id = rxq->ev.queue_id; 3664 ev->priority = rxq->ev.priority; 3665 ev->event_ptr = sec_fd_to_mbuf(fd); 3666 3667 qbman_swp_dqrr_consume(swp, dq); 3668 } 3669 static void 3670 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused, 3671 const struct qbman_fd *fd, 3672 const struct qbman_result *dq, 3673 struct dpaa2_queue *rxq, 3674 struct rte_event *ev) 3675 { 3676 uint8_t dqrr_index; 3677 struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr; 3678 /* Prefetching mbuf */ 3679 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)- 3680 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size)); 3681 3682 /* Prefetching ipsec crypto_op stored in priv data of mbuf */ 3683 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64)); 3684 3685 ev->flow_id = rxq->ev.flow_id; 3686 ev->sub_event_type = rxq->ev.sub_event_type; 3687 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3688 ev->op = RTE_EVENT_OP_NEW; 3689 ev->sched_type = rxq->ev.sched_type; 3690 ev->queue_id = rxq->ev.queue_id; 3691 ev->priority = rxq->ev.priority; 3692 3693 ev->event_ptr = sec_fd_to_mbuf(fd); 3694 dqrr_index = qbman_get_dqrr_idx(dq); 3695 crypto_op->sym->m_src->seqn = dqrr_index + 1; 3696 DPAA2_PER_LCORE_DQRR_SIZE++; 3697 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; 3698 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src; 3699 } 3700 3701 int 3702 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev, 3703 int qp_id, 3704 struct dpaa2_dpcon_dev *dpcon, 3705 const struct rte_event *event) 3706 { 3707 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3708 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3709 struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id]; 3710 struct dpseci_rx_queue_cfg cfg; 3711 uint8_t priority; 3712 int ret; 3713 3714 if (event->sched_type == RTE_SCHED_TYPE_PARALLEL) 3715 qp->rx_vq.cb = dpaa2_sec_process_parallel_event; 3716 else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) 3717 qp->rx_vq.cb = dpaa2_sec_process_atomic_event; 3718 else 3719 return -EINVAL; 3720 3721 priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / event->priority) * 3722 (dpcon->num_priorities - 1); 3723 3724 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 3725 cfg.options = DPSECI_QUEUE_OPT_DEST; 3726 cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON; 3727 cfg.dest_cfg.dest_id = dpcon->dpcon_id; 3728 cfg.dest_cfg.priority = priority; 3729 3730 cfg.options |= DPSECI_QUEUE_OPT_USER_CTX; 3731 cfg.user_ctx = (size_t)(qp); 3732 if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) { 3733 cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION; 3734 cfg.order_preservation_en = 1; 3735 } 3736 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 3737 qp_id, &cfg); 3738 if (ret) { 3739 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret); 3740 return ret; 3741 } 3742 3743 memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event)); 3744 3745 return 0; 3746 } 3747 3748 int 3749 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev, 3750 int qp_id) 3751 { 3752 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3753 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3754 struct dpseci_rx_queue_cfg cfg; 3755 int ret; 3756 3757 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 3758 cfg.options = DPSECI_QUEUE_OPT_DEST; 3759 cfg.dest_cfg.dest_type = DPSECI_DEST_NONE; 3760 3761 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 3762 qp_id, &cfg); 3763 if (ret) 3764 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret); 3765 3766 return ret; 3767 } 3768 3769 static struct rte_cryptodev_ops crypto_ops = { 3770 .dev_configure = dpaa2_sec_dev_configure, 3771 .dev_start = dpaa2_sec_dev_start, 3772 .dev_stop = dpaa2_sec_dev_stop, 3773 .dev_close = dpaa2_sec_dev_close, 3774 .dev_infos_get = dpaa2_sec_dev_infos_get, 3775 .stats_get = dpaa2_sec_stats_get, 3776 .stats_reset = dpaa2_sec_stats_reset, 3777 .queue_pair_setup = dpaa2_sec_queue_pair_setup, 3778 .queue_pair_release = dpaa2_sec_queue_pair_release, 3779 .sym_session_get_size = dpaa2_sec_sym_session_get_size, 3780 .sym_session_configure = dpaa2_sec_sym_session_configure, 3781 .sym_session_clear = dpaa2_sec_sym_session_clear, 3782 }; 3783 3784 #ifdef RTE_LIBRTE_SECURITY 3785 static const struct rte_security_capability * 3786 dpaa2_sec_capabilities_get(void *device __rte_unused) 3787 { 3788 return dpaa2_sec_security_cap; 3789 } 3790 3791 static const struct rte_security_ops dpaa2_sec_security_ops = { 3792 .session_create = dpaa2_sec_security_session_create, 3793 .session_update = NULL, 3794 .session_stats_get = NULL, 3795 .session_destroy = dpaa2_sec_security_session_destroy, 3796 .set_pkt_metadata = NULL, 3797 .capabilities_get = dpaa2_sec_capabilities_get 3798 }; 3799 #endif 3800 3801 static int 3802 dpaa2_sec_uninit(const struct rte_cryptodev *dev) 3803 { 3804 struct dpaa2_sec_dev_private *internals = dev->data->dev_private; 3805 3806 rte_free(dev->security_ctx); 3807 3808 rte_mempool_free(internals->fle_pool); 3809 3810 DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u", 3811 dev->data->name, rte_socket_id()); 3812 3813 return 0; 3814 } 3815 3816 static int 3817 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev) 3818 { 3819 struct dpaa2_sec_dev_private *internals; 3820 struct rte_device *dev = cryptodev->device; 3821 struct rte_dpaa2_device *dpaa2_dev; 3822 #ifdef RTE_LIBRTE_SECURITY 3823 struct rte_security_ctx *security_instance; 3824 #endif 3825 struct fsl_mc_io *dpseci; 3826 uint16_t token; 3827 struct dpseci_attr attr; 3828 int retcode, hw_id; 3829 char str[30]; 3830 3831 PMD_INIT_FUNC_TRACE(); 3832 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 3833 hw_id = dpaa2_dev->object_id; 3834 3835 cryptodev->driver_id = cryptodev_driver_id; 3836 cryptodev->dev_ops = &crypto_ops; 3837 3838 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst; 3839 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst; 3840 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 3841 RTE_CRYPTODEV_FF_HW_ACCELERATED | 3842 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 3843 RTE_CRYPTODEV_FF_SECURITY | 3844 RTE_CRYPTODEV_FF_IN_PLACE_SGL | 3845 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | 3846 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | 3847 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | 3848 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; 3849 3850 internals = cryptodev->data->dev_private; 3851 3852 /* 3853 * For secondary processes, we don't initialise any further as primary 3854 * has already done this work. Only check we don't need a different 3855 * RX function 3856 */ 3857 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 3858 DPAA2_SEC_DEBUG("Device already init by primary process"); 3859 return 0; 3860 } 3861 #ifdef RTE_LIBRTE_SECURITY 3862 /* Initialize security_ctx only for primary process*/ 3863 security_instance = rte_malloc("rte_security_instances_ops", 3864 sizeof(struct rte_security_ctx), 0); 3865 if (security_instance == NULL) 3866 return -ENOMEM; 3867 security_instance->device = (void *)cryptodev; 3868 security_instance->ops = &dpaa2_sec_security_ops; 3869 security_instance->sess_cnt = 0; 3870 cryptodev->security_ctx = security_instance; 3871 #endif 3872 /*Open the rte device via MC and save the handle for further use*/ 3873 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1, 3874 sizeof(struct fsl_mc_io), 0); 3875 if (!dpseci) { 3876 DPAA2_SEC_ERR( 3877 "Error in allocating the memory for dpsec object"); 3878 return -ENOMEM; 3879 } 3880 dpseci->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX); 3881 3882 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token); 3883 if (retcode != 0) { 3884 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x", 3885 retcode); 3886 goto init_error; 3887 } 3888 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr); 3889 if (retcode != 0) { 3890 DPAA2_SEC_ERR( 3891 "Cannot get dpsec device attributed: Error = %x", 3892 retcode); 3893 goto init_error; 3894 } 3895 snprintf(cryptodev->data->name, sizeof(cryptodev->data->name), 3896 "dpsec-%u", hw_id); 3897 3898 internals->max_nb_queue_pairs = attr.num_tx_queues; 3899 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs; 3900 internals->hw = dpseci; 3901 internals->token = token; 3902 3903 snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d", 3904 getpid(), cryptodev->data->dev_id); 3905 internals->fle_pool = rte_mempool_create((const char *)str, 3906 FLE_POOL_NUM_BUFS, 3907 FLE_POOL_BUF_SIZE, 3908 FLE_POOL_CACHE_SIZE, 0, 3909 NULL, NULL, NULL, NULL, 3910 SOCKET_ID_ANY, 0); 3911 if (!internals->fle_pool) { 3912 DPAA2_SEC_ERR("Mempool (%s) creation failed", str); 3913 goto init_error; 3914 } 3915 3916 DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name); 3917 return 0; 3918 3919 init_error: 3920 DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name); 3921 3922 /* dpaa2_sec_uninit(crypto_dev_name); */ 3923 return -EFAULT; 3924 } 3925 3926 static int 3927 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused, 3928 struct rte_dpaa2_device *dpaa2_dev) 3929 { 3930 struct rte_cryptodev *cryptodev; 3931 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 3932 3933 int retval; 3934 3935 snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d", 3936 dpaa2_dev->object_id); 3937 3938 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id()); 3939 if (cryptodev == NULL) 3940 return -ENOMEM; 3941 3942 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 3943 cryptodev->data->dev_private = rte_zmalloc_socket( 3944 "cryptodev private structure", 3945 sizeof(struct dpaa2_sec_dev_private), 3946 RTE_CACHE_LINE_SIZE, 3947 rte_socket_id()); 3948 3949 if (cryptodev->data->dev_private == NULL) 3950 rte_panic("Cannot allocate memzone for private " 3951 "device data"); 3952 } 3953 3954 dpaa2_dev->cryptodev = cryptodev; 3955 cryptodev->device = &dpaa2_dev->device; 3956 3957 /* init user callbacks */ 3958 TAILQ_INIT(&(cryptodev->link_intr_cbs)); 3959 3960 if (dpaa2_svr_family == SVR_LX2160A) 3961 rta_set_sec_era(RTA_SEC_ERA_10); 3962 else 3963 rta_set_sec_era(RTA_SEC_ERA_8); 3964 3965 DPAA2_SEC_INFO("2-SEC ERA is %d", rta_get_sec_era()); 3966 3967 /* Invoke PMD device initialization function */ 3968 retval = dpaa2_sec_dev_init(cryptodev); 3969 if (retval == 0) 3970 return 0; 3971 3972 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 3973 rte_free(cryptodev->data->dev_private); 3974 3975 cryptodev->attached = RTE_CRYPTODEV_DETACHED; 3976 3977 return -ENXIO; 3978 } 3979 3980 static int 3981 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev) 3982 { 3983 struct rte_cryptodev *cryptodev; 3984 int ret; 3985 3986 cryptodev = dpaa2_dev->cryptodev; 3987 if (cryptodev == NULL) 3988 return -ENODEV; 3989 3990 ret = dpaa2_sec_uninit(cryptodev); 3991 if (ret) 3992 return ret; 3993 3994 return rte_cryptodev_pmd_destroy(cryptodev); 3995 } 3996 3997 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = { 3998 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA, 3999 .drv_type = DPAA2_CRYPTO, 4000 .driver = { 4001 .name = "DPAA2 SEC PMD" 4002 }, 4003 .probe = cryptodev_dpaa2_sec_probe, 4004 .remove = cryptodev_dpaa2_sec_remove, 4005 }; 4006 4007 static struct cryptodev_driver dpaa2_sec_crypto_drv; 4008 4009 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver); 4010 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, 4011 rte_dpaa2_sec_driver.driver, cryptodev_driver_id); 4012 RTE_LOG_REGISTER(dpaa2_logtype_sec, pmd.crypto.dpaa2, NOTICE); 4013