1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2016-2018 NXP 5 * 6 */ 7 8 #include <time.h> 9 #include <net/if.h> 10 #include <unistd.h> 11 12 #include <rte_mbuf.h> 13 #include <rte_cryptodev.h> 14 #include <rte_malloc.h> 15 #include <rte_memcpy.h> 16 #include <rte_string_fns.h> 17 #include <rte_cycles.h> 18 #include <rte_kvargs.h> 19 #include <rte_dev.h> 20 #include <rte_cryptodev_pmd.h> 21 #include <rte_common.h> 22 #include <rte_fslmc.h> 23 #include <fslmc_vfio.h> 24 #include <dpaa2_hw_pvt.h> 25 #include <dpaa2_hw_dpio.h> 26 #include <dpaa2_hw_mempool.h> 27 #include <fsl_dpopr.h> 28 #include <fsl_dpseci.h> 29 #include <fsl_mc_sys.h> 30 31 #include "dpaa2_sec_priv.h" 32 #include "dpaa2_sec_event.h" 33 #include "dpaa2_sec_logs.h" 34 35 /* Required types */ 36 typedef uint64_t dma_addr_t; 37 38 /* RTA header files */ 39 #include <hw/desc/ipsec.h> 40 #include <hw/desc/pdcp.h> 41 #include <hw/desc/algo.h> 42 43 /* Minimum job descriptor consists of a oneword job descriptor HEADER and 44 * a pointer to the shared descriptor 45 */ 46 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ) 47 #define FSL_VENDOR_ID 0x1957 48 #define FSL_DEVICE_ID 0x410 49 #define FSL_SUBSYSTEM_SEC 1 50 #define FSL_MC_DPSECI_DEVID 3 51 52 #define NO_PREFETCH 0 53 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */ 54 #define FLE_POOL_NUM_BUFS 32000 55 #define FLE_POOL_BUF_SIZE 256 56 #define FLE_POOL_CACHE_SIZE 512 57 #define FLE_SG_MEM_SIZE 2048 58 #define SEC_FLC_DHR_OUTBOUND -114 59 #define SEC_FLC_DHR_INBOUND 0 60 61 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8; 62 63 static uint8_t cryptodev_driver_id; 64 65 int dpaa2_logtype_sec; 66 67 static inline int 68 build_proto_compound_fd(dpaa2_sec_session *sess, 69 struct rte_crypto_op *op, 70 struct qbman_fd *fd, uint16_t bpid) 71 { 72 struct rte_crypto_sym_op *sym_op = op->sym; 73 struct ctxt_priv *priv = sess->ctxt; 74 struct qbman_fle *fle, *ip_fle, *op_fle; 75 struct sec_flow_context *flc; 76 struct rte_mbuf *src_mbuf = sym_op->m_src; 77 struct rte_mbuf *dst_mbuf = sym_op->m_dst; 78 int retval; 79 80 if (!dst_mbuf) 81 dst_mbuf = src_mbuf; 82 83 /* Save the shared descriptor */ 84 flc = &priv->flc_desc[0].flc; 85 86 /* we are using the first FLE entry to store Mbuf */ 87 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 88 if (retval) { 89 DPAA2_SEC_ERR("Memory alloc failed"); 90 return -1; 91 } 92 memset(fle, 0, FLE_POOL_BUF_SIZE); 93 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 94 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 95 96 op_fle = fle + 1; 97 ip_fle = fle + 2; 98 99 if (likely(bpid < MAX_BPID)) { 100 DPAA2_SET_FD_BPID(fd, bpid); 101 DPAA2_SET_FLE_BPID(op_fle, bpid); 102 DPAA2_SET_FLE_BPID(ip_fle, bpid); 103 } else { 104 DPAA2_SET_FD_IVP(fd); 105 DPAA2_SET_FLE_IVP(op_fle); 106 DPAA2_SET_FLE_IVP(ip_fle); 107 } 108 109 /* Configure FD as a FRAME LIST */ 110 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 111 DPAA2_SET_FD_COMPOUND_FMT(fd); 112 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 113 114 /* Configure Output FLE with dst mbuf data */ 115 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf)); 116 DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off); 117 DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len); 118 119 /* Configure Input FLE with src mbuf data */ 120 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf)); 121 DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off); 122 DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len); 123 124 DPAA2_SET_FD_LEN(fd, ip_fle->length); 125 DPAA2_SET_FLE_FIN(ip_fle); 126 127 #ifdef ENABLE_HFN_OVERRIDE 128 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) { 129 /*enable HFN override override */ 130 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, sess->pdcp.hfn_ovd); 131 DPAA2_SET_FLE_INTERNAL_JD(op_fle, sess->pdcp.hfn_ovd); 132 DPAA2_SET_FD_INTERNAL_JD(fd, sess->pdcp.hfn_ovd); 133 } 134 #endif 135 136 return 0; 137 138 } 139 140 static inline int 141 build_proto_fd(dpaa2_sec_session *sess, 142 struct rte_crypto_op *op, 143 struct qbman_fd *fd, uint16_t bpid) 144 { 145 struct rte_crypto_sym_op *sym_op = op->sym; 146 if (sym_op->m_dst) 147 return build_proto_compound_fd(sess, op, fd, bpid); 148 149 struct ctxt_priv *priv = sess->ctxt; 150 struct sec_flow_context *flc; 151 struct rte_mbuf *mbuf = sym_op->m_src; 152 153 if (likely(bpid < MAX_BPID)) 154 DPAA2_SET_FD_BPID(fd, bpid); 155 else 156 DPAA2_SET_FD_IVP(fd); 157 158 /* Save the shared descriptor */ 159 flc = &priv->flc_desc[0].flc; 160 161 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 162 DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off); 163 DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len); 164 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 165 166 /* save physical address of mbuf */ 167 op->sym->aead.digest.phys_addr = mbuf->buf_iova; 168 mbuf->buf_iova = (size_t)op; 169 170 return 0; 171 } 172 173 static inline int 174 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess, 175 struct rte_crypto_op *op, 176 struct qbman_fd *fd, __rte_unused uint16_t bpid) 177 { 178 struct rte_crypto_sym_op *sym_op = op->sym; 179 struct ctxt_priv *priv = sess->ctxt; 180 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 181 struct sec_flow_context *flc; 182 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len; 183 int icv_len = sess->digest_length; 184 uint8_t *old_icv; 185 struct rte_mbuf *mbuf; 186 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 187 sess->iv.offset); 188 189 PMD_INIT_FUNC_TRACE(); 190 191 if (sym_op->m_dst) 192 mbuf = sym_op->m_dst; 193 else 194 mbuf = sym_op->m_src; 195 196 /* first FLE entry used to store mbuf and session ctxt */ 197 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE, 198 RTE_CACHE_LINE_SIZE); 199 if (unlikely(!fle)) { 200 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE"); 201 return -1; 202 } 203 memset(fle, 0, FLE_SG_MEM_SIZE); 204 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 205 DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv); 206 207 op_fle = fle + 1; 208 ip_fle = fle + 2; 209 sge = fle + 3; 210 211 /* Save the shared descriptor */ 212 flc = &priv->flc_desc[0].flc; 213 214 /* Configure FD as a FRAME LIST */ 215 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 216 DPAA2_SET_FD_COMPOUND_FMT(fd); 217 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 218 219 DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n" 220 "iv-len=%d data_off: 0x%x\n", 221 sym_op->aead.data.offset, 222 sym_op->aead.data.length, 223 sess->digest_length, 224 sess->iv.length, 225 sym_op->m_src->data_off); 226 227 /* Configure Output FLE with Scatter/Gather Entry */ 228 DPAA2_SET_FLE_SG_EXT(op_fle); 229 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 230 231 if (auth_only_len) 232 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len); 233 234 op_fle->length = (sess->dir == DIR_ENC) ? 235 (sym_op->aead.data.length + icv_len + auth_only_len) : 236 sym_op->aead.data.length + auth_only_len; 237 238 /* Configure Output SGE for Encap/Decap */ 239 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 240 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + 241 RTE_ALIGN_CEIL(auth_only_len, 16) - auth_only_len); 242 sge->length = mbuf->data_len - sym_op->aead.data.offset + auth_only_len; 243 244 mbuf = mbuf->next; 245 /* o/p segs */ 246 while (mbuf) { 247 sge++; 248 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 249 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 250 sge->length = mbuf->data_len; 251 mbuf = mbuf->next; 252 } 253 sge->length -= icv_len; 254 255 if (sess->dir == DIR_ENC) { 256 sge++; 257 DPAA2_SET_FLE_ADDR(sge, 258 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data)); 259 sge->length = icv_len; 260 } 261 DPAA2_SET_FLE_FIN(sge); 262 263 sge++; 264 mbuf = sym_op->m_src; 265 266 /* Configure Input FLE with Scatter/Gather Entry */ 267 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 268 DPAA2_SET_FLE_SG_EXT(ip_fle); 269 DPAA2_SET_FLE_FIN(ip_fle); 270 ip_fle->length = (sess->dir == DIR_ENC) ? 271 (sym_op->aead.data.length + sess->iv.length + auth_only_len) : 272 (sym_op->aead.data.length + sess->iv.length + auth_only_len + 273 icv_len); 274 275 /* Configure Input SGE for Encap/Decap */ 276 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr)); 277 sge->length = sess->iv.length; 278 279 sge++; 280 if (auth_only_len) { 281 DPAA2_SET_FLE_ADDR(sge, 282 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data)); 283 sge->length = auth_only_len; 284 sge++; 285 } 286 287 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 288 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + 289 mbuf->data_off); 290 sge->length = mbuf->data_len - sym_op->aead.data.offset; 291 292 mbuf = mbuf->next; 293 /* i/p segs */ 294 while (mbuf) { 295 sge++; 296 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 297 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 298 sge->length = mbuf->data_len; 299 mbuf = mbuf->next; 300 } 301 302 if (sess->dir == DIR_DEC) { 303 sge++; 304 old_icv = (uint8_t *)(sge + 1); 305 memcpy(old_icv, sym_op->aead.digest.data, icv_len); 306 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 307 sge->length = icv_len; 308 } 309 310 DPAA2_SET_FLE_FIN(sge); 311 if (auth_only_len) { 312 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len); 313 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 314 } 315 DPAA2_SET_FD_LEN(fd, ip_fle->length); 316 317 return 0; 318 } 319 320 static inline int 321 build_authenc_gcm_fd(dpaa2_sec_session *sess, 322 struct rte_crypto_op *op, 323 struct qbman_fd *fd, uint16_t bpid) 324 { 325 struct rte_crypto_sym_op *sym_op = op->sym; 326 struct ctxt_priv *priv = sess->ctxt; 327 struct qbman_fle *fle, *sge; 328 struct sec_flow_context *flc; 329 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len; 330 int icv_len = sess->digest_length, retval; 331 uint8_t *old_icv; 332 struct rte_mbuf *dst; 333 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 334 sess->iv.offset); 335 336 PMD_INIT_FUNC_TRACE(); 337 338 if (sym_op->m_dst) 339 dst = sym_op->m_dst; 340 else 341 dst = sym_op->m_src; 342 343 /* TODO we are using the first FLE entry to store Mbuf and session ctxt. 344 * Currently we donot know which FLE has the mbuf stored. 345 * So while retreiving we can go back 1 FLE from the FD -ADDR 346 * to get the MBUF Addr from the previous FLE. 347 * We can have a better approach to use the inline Mbuf 348 */ 349 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 350 if (retval) { 351 DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE"); 352 return -1; 353 } 354 memset(fle, 0, FLE_POOL_BUF_SIZE); 355 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 356 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 357 fle = fle + 1; 358 sge = fle + 2; 359 if (likely(bpid < MAX_BPID)) { 360 DPAA2_SET_FD_BPID(fd, bpid); 361 DPAA2_SET_FLE_BPID(fle, bpid); 362 DPAA2_SET_FLE_BPID(fle + 1, bpid); 363 DPAA2_SET_FLE_BPID(sge, bpid); 364 DPAA2_SET_FLE_BPID(sge + 1, bpid); 365 DPAA2_SET_FLE_BPID(sge + 2, bpid); 366 DPAA2_SET_FLE_BPID(sge + 3, bpid); 367 } else { 368 DPAA2_SET_FD_IVP(fd); 369 DPAA2_SET_FLE_IVP(fle); 370 DPAA2_SET_FLE_IVP((fle + 1)); 371 DPAA2_SET_FLE_IVP(sge); 372 DPAA2_SET_FLE_IVP((sge + 1)); 373 DPAA2_SET_FLE_IVP((sge + 2)); 374 DPAA2_SET_FLE_IVP((sge + 3)); 375 } 376 377 /* Save the shared descriptor */ 378 flc = &priv->flc_desc[0].flc; 379 /* Configure FD as a FRAME LIST */ 380 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 381 DPAA2_SET_FD_COMPOUND_FMT(fd); 382 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 383 384 DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n" 385 "iv-len=%d data_off: 0x%x\n", 386 sym_op->aead.data.offset, 387 sym_op->aead.data.length, 388 sess->digest_length, 389 sess->iv.length, 390 sym_op->m_src->data_off); 391 392 /* Configure Output FLE with Scatter/Gather Entry */ 393 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 394 if (auth_only_len) 395 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 396 fle->length = (sess->dir == DIR_ENC) ? 397 (sym_op->aead.data.length + icv_len + auth_only_len) : 398 sym_op->aead.data.length + auth_only_len; 399 400 DPAA2_SET_FLE_SG_EXT(fle); 401 402 /* Configure Output SGE for Encap/Decap */ 403 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 404 DPAA2_SET_FLE_OFFSET(sge, dst->data_off + 405 RTE_ALIGN_CEIL(auth_only_len, 16) - auth_only_len); 406 sge->length = sym_op->aead.data.length + auth_only_len; 407 408 if (sess->dir == DIR_ENC) { 409 sge++; 410 DPAA2_SET_FLE_ADDR(sge, 411 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data)); 412 sge->length = sess->digest_length; 413 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length + 414 sess->iv.length + auth_only_len)); 415 } 416 DPAA2_SET_FLE_FIN(sge); 417 418 sge++; 419 fle++; 420 421 /* Configure Input FLE with Scatter/Gather Entry */ 422 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 423 DPAA2_SET_FLE_SG_EXT(fle); 424 DPAA2_SET_FLE_FIN(fle); 425 fle->length = (sess->dir == DIR_ENC) ? 426 (sym_op->aead.data.length + sess->iv.length + auth_only_len) : 427 (sym_op->aead.data.length + sess->iv.length + auth_only_len + 428 sess->digest_length); 429 430 /* Configure Input SGE for Encap/Decap */ 431 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr)); 432 sge->length = sess->iv.length; 433 sge++; 434 if (auth_only_len) { 435 DPAA2_SET_FLE_ADDR(sge, 436 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data)); 437 sge->length = auth_only_len; 438 DPAA2_SET_FLE_BPID(sge, bpid); 439 sge++; 440 } 441 442 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 443 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + 444 sym_op->m_src->data_off); 445 sge->length = sym_op->aead.data.length; 446 if (sess->dir == DIR_DEC) { 447 sge++; 448 old_icv = (uint8_t *)(sge + 1); 449 memcpy(old_icv, sym_op->aead.digest.data, 450 sess->digest_length); 451 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 452 sge->length = sess->digest_length; 453 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length + 454 sess->digest_length + 455 sess->iv.length + 456 auth_only_len)); 457 } 458 DPAA2_SET_FLE_FIN(sge); 459 460 if (auth_only_len) { 461 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 462 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 463 } 464 465 return 0; 466 } 467 468 static inline int 469 build_authenc_sg_fd(dpaa2_sec_session *sess, 470 struct rte_crypto_op *op, 471 struct qbman_fd *fd, __rte_unused uint16_t bpid) 472 { 473 struct rte_crypto_sym_op *sym_op = op->sym; 474 struct ctxt_priv *priv = sess->ctxt; 475 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 476 struct sec_flow_context *flc; 477 uint32_t auth_only_len = sym_op->auth.data.length - 478 sym_op->cipher.data.length; 479 int icv_len = sess->digest_length; 480 uint8_t *old_icv; 481 struct rte_mbuf *mbuf; 482 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 483 sess->iv.offset); 484 485 PMD_INIT_FUNC_TRACE(); 486 487 if (sym_op->m_dst) 488 mbuf = sym_op->m_dst; 489 else 490 mbuf = sym_op->m_src; 491 492 /* first FLE entry used to store mbuf and session ctxt */ 493 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE, 494 RTE_CACHE_LINE_SIZE); 495 if (unlikely(!fle)) { 496 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE"); 497 return -1; 498 } 499 memset(fle, 0, FLE_SG_MEM_SIZE); 500 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 501 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 502 503 op_fle = fle + 1; 504 ip_fle = fle + 2; 505 sge = fle + 3; 506 507 /* Save the shared descriptor */ 508 flc = &priv->flc_desc[0].flc; 509 510 /* Configure FD as a FRAME LIST */ 511 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 512 DPAA2_SET_FD_COMPOUND_FMT(fd); 513 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 514 515 DPAA2_SEC_DP_DEBUG( 516 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n" 517 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", 518 sym_op->auth.data.offset, 519 sym_op->auth.data.length, 520 sess->digest_length, 521 sym_op->cipher.data.offset, 522 sym_op->cipher.data.length, 523 sess->iv.length, 524 sym_op->m_src->data_off); 525 526 /* Configure Output FLE with Scatter/Gather Entry */ 527 DPAA2_SET_FLE_SG_EXT(op_fle); 528 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 529 530 if (auth_only_len) 531 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len); 532 533 op_fle->length = (sess->dir == DIR_ENC) ? 534 (sym_op->cipher.data.length + icv_len) : 535 sym_op->cipher.data.length; 536 537 /* Configure Output SGE for Encap/Decap */ 538 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 539 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset); 540 sge->length = mbuf->data_len - sym_op->auth.data.offset; 541 542 mbuf = mbuf->next; 543 /* o/p segs */ 544 while (mbuf) { 545 sge++; 546 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 547 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 548 sge->length = mbuf->data_len; 549 mbuf = mbuf->next; 550 } 551 sge->length -= icv_len; 552 553 if (sess->dir == DIR_ENC) { 554 sge++; 555 DPAA2_SET_FLE_ADDR(sge, 556 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 557 sge->length = icv_len; 558 } 559 DPAA2_SET_FLE_FIN(sge); 560 561 sge++; 562 mbuf = sym_op->m_src; 563 564 /* Configure Input FLE with Scatter/Gather Entry */ 565 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 566 DPAA2_SET_FLE_SG_EXT(ip_fle); 567 DPAA2_SET_FLE_FIN(ip_fle); 568 ip_fle->length = (sess->dir == DIR_ENC) ? 569 (sym_op->auth.data.length + sess->iv.length) : 570 (sym_op->auth.data.length + sess->iv.length + 571 icv_len); 572 573 /* Configure Input SGE for Encap/Decap */ 574 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 575 sge->length = sess->iv.length; 576 577 sge++; 578 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 579 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + 580 mbuf->data_off); 581 sge->length = mbuf->data_len - sym_op->auth.data.offset; 582 583 mbuf = mbuf->next; 584 /* i/p segs */ 585 while (mbuf) { 586 sge++; 587 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 588 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 589 sge->length = mbuf->data_len; 590 mbuf = mbuf->next; 591 } 592 sge->length -= icv_len; 593 594 if (sess->dir == DIR_DEC) { 595 sge++; 596 old_icv = (uint8_t *)(sge + 1); 597 memcpy(old_icv, sym_op->auth.digest.data, 598 icv_len); 599 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 600 sge->length = icv_len; 601 } 602 603 DPAA2_SET_FLE_FIN(sge); 604 if (auth_only_len) { 605 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len); 606 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 607 } 608 DPAA2_SET_FD_LEN(fd, ip_fle->length); 609 610 return 0; 611 } 612 613 static inline int 614 build_authenc_fd(dpaa2_sec_session *sess, 615 struct rte_crypto_op *op, 616 struct qbman_fd *fd, uint16_t bpid) 617 { 618 struct rte_crypto_sym_op *sym_op = op->sym; 619 struct ctxt_priv *priv = sess->ctxt; 620 struct qbman_fle *fle, *sge; 621 struct sec_flow_context *flc; 622 uint32_t auth_only_len = sym_op->auth.data.length - 623 sym_op->cipher.data.length; 624 int icv_len = sess->digest_length, retval; 625 uint8_t *old_icv; 626 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 627 sess->iv.offset); 628 struct rte_mbuf *dst; 629 630 PMD_INIT_FUNC_TRACE(); 631 632 if (sym_op->m_dst) 633 dst = sym_op->m_dst; 634 else 635 dst = sym_op->m_src; 636 637 /* we are using the first FLE entry to store Mbuf. 638 * Currently we donot know which FLE has the mbuf stored. 639 * So while retreiving we can go back 1 FLE from the FD -ADDR 640 * to get the MBUF Addr from the previous FLE. 641 * We can have a better approach to use the inline Mbuf 642 */ 643 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 644 if (retval) { 645 DPAA2_SEC_ERR("Memory alloc failed for SGE"); 646 return -1; 647 } 648 memset(fle, 0, FLE_POOL_BUF_SIZE); 649 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 650 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 651 fle = fle + 1; 652 sge = fle + 2; 653 if (likely(bpid < MAX_BPID)) { 654 DPAA2_SET_FD_BPID(fd, bpid); 655 DPAA2_SET_FLE_BPID(fle, bpid); 656 DPAA2_SET_FLE_BPID(fle + 1, bpid); 657 DPAA2_SET_FLE_BPID(sge, bpid); 658 DPAA2_SET_FLE_BPID(sge + 1, bpid); 659 DPAA2_SET_FLE_BPID(sge + 2, bpid); 660 DPAA2_SET_FLE_BPID(sge + 3, bpid); 661 } else { 662 DPAA2_SET_FD_IVP(fd); 663 DPAA2_SET_FLE_IVP(fle); 664 DPAA2_SET_FLE_IVP((fle + 1)); 665 DPAA2_SET_FLE_IVP(sge); 666 DPAA2_SET_FLE_IVP((sge + 1)); 667 DPAA2_SET_FLE_IVP((sge + 2)); 668 DPAA2_SET_FLE_IVP((sge + 3)); 669 } 670 671 /* Save the shared descriptor */ 672 flc = &priv->flc_desc[0].flc; 673 /* Configure FD as a FRAME LIST */ 674 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 675 DPAA2_SET_FD_COMPOUND_FMT(fd); 676 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 677 678 DPAA2_SEC_DP_DEBUG( 679 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n" 680 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", 681 sym_op->auth.data.offset, 682 sym_op->auth.data.length, 683 sess->digest_length, 684 sym_op->cipher.data.offset, 685 sym_op->cipher.data.length, 686 sess->iv.length, 687 sym_op->m_src->data_off); 688 689 /* Configure Output FLE with Scatter/Gather Entry */ 690 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 691 if (auth_only_len) 692 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 693 fle->length = (sess->dir == DIR_ENC) ? 694 (sym_op->cipher.data.length + icv_len) : 695 sym_op->cipher.data.length; 696 697 DPAA2_SET_FLE_SG_EXT(fle); 698 699 /* Configure Output SGE for Encap/Decap */ 700 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 701 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + 702 dst->data_off); 703 sge->length = sym_op->cipher.data.length; 704 705 if (sess->dir == DIR_ENC) { 706 sge++; 707 DPAA2_SET_FLE_ADDR(sge, 708 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 709 sge->length = sess->digest_length; 710 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 711 sess->iv.length)); 712 } 713 DPAA2_SET_FLE_FIN(sge); 714 715 sge++; 716 fle++; 717 718 /* Configure Input FLE with Scatter/Gather Entry */ 719 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 720 DPAA2_SET_FLE_SG_EXT(fle); 721 DPAA2_SET_FLE_FIN(fle); 722 fle->length = (sess->dir == DIR_ENC) ? 723 (sym_op->auth.data.length + sess->iv.length) : 724 (sym_op->auth.data.length + sess->iv.length + 725 sess->digest_length); 726 727 /* Configure Input SGE for Encap/Decap */ 728 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 729 sge->length = sess->iv.length; 730 sge++; 731 732 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 733 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + 734 sym_op->m_src->data_off); 735 sge->length = sym_op->auth.data.length; 736 if (sess->dir == DIR_DEC) { 737 sge++; 738 old_icv = (uint8_t *)(sge + 1); 739 memcpy(old_icv, sym_op->auth.digest.data, 740 sess->digest_length); 741 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 742 sge->length = sess->digest_length; 743 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 744 sess->digest_length + 745 sess->iv.length)); 746 } 747 DPAA2_SET_FLE_FIN(sge); 748 if (auth_only_len) { 749 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 750 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 751 } 752 return 0; 753 } 754 755 static inline int build_auth_sg_fd( 756 dpaa2_sec_session *sess, 757 struct rte_crypto_op *op, 758 struct qbman_fd *fd, 759 __rte_unused uint16_t bpid) 760 { 761 struct rte_crypto_sym_op *sym_op = op->sym; 762 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 763 struct sec_flow_context *flc; 764 struct ctxt_priv *priv = sess->ctxt; 765 uint8_t *old_digest; 766 struct rte_mbuf *mbuf; 767 768 PMD_INIT_FUNC_TRACE(); 769 770 mbuf = sym_op->m_src; 771 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE, 772 RTE_CACHE_LINE_SIZE); 773 if (unlikely(!fle)) { 774 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE"); 775 return -1; 776 } 777 memset(fle, 0, FLE_SG_MEM_SIZE); 778 /* first FLE entry used to store mbuf and session ctxt */ 779 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 780 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 781 op_fle = fle + 1; 782 ip_fle = fle + 2; 783 sge = fle + 3; 784 785 flc = &priv->flc_desc[DESC_INITFINAL].flc; 786 /* sg FD */ 787 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 788 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 789 DPAA2_SET_FD_COMPOUND_FMT(fd); 790 791 /* o/p fle */ 792 DPAA2_SET_FLE_ADDR(op_fle, 793 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 794 op_fle->length = sess->digest_length; 795 796 /* i/p fle */ 797 DPAA2_SET_FLE_SG_EXT(ip_fle); 798 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 799 /* i/p 1st seg */ 800 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 801 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + mbuf->data_off); 802 sge->length = mbuf->data_len - sym_op->auth.data.offset; 803 804 /* i/p segs */ 805 mbuf = mbuf->next; 806 while (mbuf) { 807 sge++; 808 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 809 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 810 sge->length = mbuf->data_len; 811 mbuf = mbuf->next; 812 } 813 if (sess->dir == DIR_ENC) { 814 /* Digest calculation case */ 815 sge->length -= sess->digest_length; 816 ip_fle->length = sym_op->auth.data.length; 817 } else { 818 /* Digest verification case */ 819 sge++; 820 old_digest = (uint8_t *)(sge + 1); 821 rte_memcpy(old_digest, sym_op->auth.digest.data, 822 sess->digest_length); 823 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest)); 824 sge->length = sess->digest_length; 825 ip_fle->length = sym_op->auth.data.length + 826 sess->digest_length; 827 } 828 DPAA2_SET_FLE_FIN(sge); 829 DPAA2_SET_FLE_FIN(ip_fle); 830 DPAA2_SET_FD_LEN(fd, ip_fle->length); 831 832 return 0; 833 } 834 835 static inline int 836 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 837 struct qbman_fd *fd, uint16_t bpid) 838 { 839 struct rte_crypto_sym_op *sym_op = op->sym; 840 struct qbman_fle *fle, *sge; 841 struct sec_flow_context *flc; 842 struct ctxt_priv *priv = sess->ctxt; 843 uint8_t *old_digest; 844 int retval; 845 846 PMD_INIT_FUNC_TRACE(); 847 848 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 849 if (retval) { 850 DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE"); 851 return -1; 852 } 853 memset(fle, 0, FLE_POOL_BUF_SIZE); 854 /* TODO we are using the first FLE entry to store Mbuf. 855 * Currently we donot know which FLE has the mbuf stored. 856 * So while retreiving we can go back 1 FLE from the FD -ADDR 857 * to get the MBUF Addr from the previous FLE. 858 * We can have a better approach to use the inline Mbuf 859 */ 860 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 861 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 862 fle = fle + 1; 863 864 if (likely(bpid < MAX_BPID)) { 865 DPAA2_SET_FD_BPID(fd, bpid); 866 DPAA2_SET_FLE_BPID(fle, bpid); 867 DPAA2_SET_FLE_BPID(fle + 1, bpid); 868 } else { 869 DPAA2_SET_FD_IVP(fd); 870 DPAA2_SET_FLE_IVP(fle); 871 DPAA2_SET_FLE_IVP((fle + 1)); 872 } 873 flc = &priv->flc_desc[DESC_INITFINAL].flc; 874 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 875 876 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 877 fle->length = sess->digest_length; 878 879 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 880 DPAA2_SET_FD_COMPOUND_FMT(fd); 881 fle++; 882 883 if (sess->dir == DIR_ENC) { 884 DPAA2_SET_FLE_ADDR(fle, 885 DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 886 DPAA2_SET_FLE_OFFSET(fle, sym_op->auth.data.offset + 887 sym_op->m_src->data_off); 888 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length); 889 fle->length = sym_op->auth.data.length; 890 } else { 891 sge = fle + 2; 892 DPAA2_SET_FLE_SG_EXT(fle); 893 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 894 895 if (likely(bpid < MAX_BPID)) { 896 DPAA2_SET_FLE_BPID(sge, bpid); 897 DPAA2_SET_FLE_BPID(sge + 1, bpid); 898 } else { 899 DPAA2_SET_FLE_IVP(sge); 900 DPAA2_SET_FLE_IVP((sge + 1)); 901 } 902 DPAA2_SET_FLE_ADDR(sge, 903 DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 904 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + 905 sym_op->m_src->data_off); 906 907 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length + 908 sess->digest_length); 909 sge->length = sym_op->auth.data.length; 910 sge++; 911 old_digest = (uint8_t *)(sge + 1); 912 rte_memcpy(old_digest, sym_op->auth.digest.data, 913 sess->digest_length); 914 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest)); 915 sge->length = sess->digest_length; 916 fle->length = sym_op->auth.data.length + 917 sess->digest_length; 918 DPAA2_SET_FLE_FIN(sge); 919 } 920 DPAA2_SET_FLE_FIN(fle); 921 922 return 0; 923 } 924 925 static int 926 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 927 struct qbman_fd *fd, __rte_unused uint16_t bpid) 928 { 929 struct rte_crypto_sym_op *sym_op = op->sym; 930 struct qbman_fle *ip_fle, *op_fle, *sge, *fle; 931 struct sec_flow_context *flc; 932 struct ctxt_priv *priv = sess->ctxt; 933 struct rte_mbuf *mbuf; 934 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 935 sess->iv.offset); 936 937 PMD_INIT_FUNC_TRACE(); 938 939 if (sym_op->m_dst) 940 mbuf = sym_op->m_dst; 941 else 942 mbuf = sym_op->m_src; 943 944 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE, 945 RTE_CACHE_LINE_SIZE); 946 if (!fle) { 947 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE"); 948 return -1; 949 } 950 memset(fle, 0, FLE_SG_MEM_SIZE); 951 /* first FLE entry used to store mbuf and session ctxt */ 952 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 953 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 954 955 op_fle = fle + 1; 956 ip_fle = fle + 2; 957 sge = fle + 3; 958 959 flc = &priv->flc_desc[0].flc; 960 961 DPAA2_SEC_DP_DEBUG( 962 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d" 963 " data_off: 0x%x\n", 964 sym_op->cipher.data.offset, 965 sym_op->cipher.data.length, 966 sess->iv.length, 967 sym_op->m_src->data_off); 968 969 /* o/p fle */ 970 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 971 op_fle->length = sym_op->cipher.data.length; 972 DPAA2_SET_FLE_SG_EXT(op_fle); 973 974 /* o/p 1st seg */ 975 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 976 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + mbuf->data_off); 977 sge->length = mbuf->data_len - sym_op->cipher.data.offset; 978 979 mbuf = mbuf->next; 980 /* o/p segs */ 981 while (mbuf) { 982 sge++; 983 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 984 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 985 sge->length = mbuf->data_len; 986 mbuf = mbuf->next; 987 } 988 DPAA2_SET_FLE_FIN(sge); 989 990 DPAA2_SEC_DP_DEBUG( 991 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n", 992 flc, fle, fle->addr_hi, fle->addr_lo, 993 fle->length); 994 995 /* i/p fle */ 996 mbuf = sym_op->m_src; 997 sge++; 998 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 999 ip_fle->length = sess->iv.length + sym_op->cipher.data.length; 1000 DPAA2_SET_FLE_SG_EXT(ip_fle); 1001 1002 /* i/p IV */ 1003 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1004 DPAA2_SET_FLE_OFFSET(sge, 0); 1005 sge->length = sess->iv.length; 1006 1007 sge++; 1008 1009 /* i/p 1st seg */ 1010 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1011 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + 1012 mbuf->data_off); 1013 sge->length = mbuf->data_len - sym_op->cipher.data.offset; 1014 1015 mbuf = mbuf->next; 1016 /* i/p segs */ 1017 while (mbuf) { 1018 sge++; 1019 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1020 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 1021 sge->length = mbuf->data_len; 1022 mbuf = mbuf->next; 1023 } 1024 DPAA2_SET_FLE_FIN(sge); 1025 DPAA2_SET_FLE_FIN(ip_fle); 1026 1027 /* sg fd */ 1028 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 1029 DPAA2_SET_FD_LEN(fd, ip_fle->length); 1030 DPAA2_SET_FD_COMPOUND_FMT(fd); 1031 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1032 1033 DPAA2_SEC_DP_DEBUG( 1034 "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d" 1035 " off =%d, len =%d\n", 1036 DPAA2_GET_FD_ADDR(fd), 1037 DPAA2_GET_FD_BPID(fd), 1038 rte_dpaa2_bpid_info[bpid].meta_data_size, 1039 DPAA2_GET_FD_OFFSET(fd), 1040 DPAA2_GET_FD_LEN(fd)); 1041 return 0; 1042 } 1043 1044 static int 1045 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 1046 struct qbman_fd *fd, uint16_t bpid) 1047 { 1048 struct rte_crypto_sym_op *sym_op = op->sym; 1049 struct qbman_fle *fle, *sge; 1050 int retval; 1051 struct sec_flow_context *flc; 1052 struct ctxt_priv *priv = sess->ctxt; 1053 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1054 sess->iv.offset); 1055 struct rte_mbuf *dst; 1056 1057 PMD_INIT_FUNC_TRACE(); 1058 1059 if (sym_op->m_dst) 1060 dst = sym_op->m_dst; 1061 else 1062 dst = sym_op->m_src; 1063 1064 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 1065 if (retval) { 1066 DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE"); 1067 return -1; 1068 } 1069 memset(fle, 0, FLE_POOL_BUF_SIZE); 1070 /* TODO we are using the first FLE entry to store Mbuf. 1071 * Currently we donot know which FLE has the mbuf stored. 1072 * So while retreiving we can go back 1 FLE from the FD -ADDR 1073 * to get the MBUF Addr from the previous FLE. 1074 * We can have a better approach to use the inline Mbuf 1075 */ 1076 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 1077 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 1078 fle = fle + 1; 1079 sge = fle + 2; 1080 1081 if (likely(bpid < MAX_BPID)) { 1082 DPAA2_SET_FD_BPID(fd, bpid); 1083 DPAA2_SET_FLE_BPID(fle, bpid); 1084 DPAA2_SET_FLE_BPID(fle + 1, bpid); 1085 DPAA2_SET_FLE_BPID(sge, bpid); 1086 DPAA2_SET_FLE_BPID(sge + 1, bpid); 1087 } else { 1088 DPAA2_SET_FD_IVP(fd); 1089 DPAA2_SET_FLE_IVP(fle); 1090 DPAA2_SET_FLE_IVP((fle + 1)); 1091 DPAA2_SET_FLE_IVP(sge); 1092 DPAA2_SET_FLE_IVP((sge + 1)); 1093 } 1094 1095 flc = &priv->flc_desc[0].flc; 1096 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 1097 DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length + 1098 sess->iv.length); 1099 DPAA2_SET_FD_COMPOUND_FMT(fd); 1100 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1101 1102 DPAA2_SEC_DP_DEBUG( 1103 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d," 1104 " data_off: 0x%x\n", 1105 sym_op->cipher.data.offset, 1106 sym_op->cipher.data.length, 1107 sess->iv.length, 1108 sym_op->m_src->data_off); 1109 1110 DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 1111 DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset + 1112 dst->data_off); 1113 1114 fle->length = sym_op->cipher.data.length + sess->iv.length; 1115 1116 DPAA2_SEC_DP_DEBUG( 1117 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n", 1118 flc, fle, fle->addr_hi, fle->addr_lo, 1119 fle->length); 1120 1121 fle++; 1122 1123 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 1124 fle->length = sym_op->cipher.data.length + sess->iv.length; 1125 1126 DPAA2_SET_FLE_SG_EXT(fle); 1127 1128 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1129 sge->length = sess->iv.length; 1130 1131 sge++; 1132 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 1133 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + 1134 sym_op->m_src->data_off); 1135 1136 sge->length = sym_op->cipher.data.length; 1137 DPAA2_SET_FLE_FIN(sge); 1138 DPAA2_SET_FLE_FIN(fle); 1139 1140 DPAA2_SEC_DP_DEBUG( 1141 "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d" 1142 " off =%d, len =%d\n", 1143 DPAA2_GET_FD_ADDR(fd), 1144 DPAA2_GET_FD_BPID(fd), 1145 rte_dpaa2_bpid_info[bpid].meta_data_size, 1146 DPAA2_GET_FD_OFFSET(fd), 1147 DPAA2_GET_FD_LEN(fd)); 1148 1149 return 0; 1150 } 1151 1152 static inline int 1153 build_sec_fd(struct rte_crypto_op *op, 1154 struct qbman_fd *fd, uint16_t bpid) 1155 { 1156 int ret = -1; 1157 dpaa2_sec_session *sess; 1158 1159 PMD_INIT_FUNC_TRACE(); 1160 1161 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) 1162 sess = (dpaa2_sec_session *)get_sym_session_private_data( 1163 op->sym->session, cryptodev_driver_id); 1164 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) 1165 sess = (dpaa2_sec_session *)get_sec_session_private_data( 1166 op->sym->sec_session); 1167 else 1168 return -1; 1169 1170 /* Segmented buffer */ 1171 if (unlikely(!rte_pktmbuf_is_contiguous(op->sym->m_src))) { 1172 switch (sess->ctxt_type) { 1173 case DPAA2_SEC_CIPHER: 1174 ret = build_cipher_sg_fd(sess, op, fd, bpid); 1175 break; 1176 case DPAA2_SEC_AUTH: 1177 ret = build_auth_sg_fd(sess, op, fd, bpid); 1178 break; 1179 case DPAA2_SEC_AEAD: 1180 ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid); 1181 break; 1182 case DPAA2_SEC_CIPHER_HASH: 1183 ret = build_authenc_sg_fd(sess, op, fd, bpid); 1184 break; 1185 case DPAA2_SEC_HASH_CIPHER: 1186 default: 1187 DPAA2_SEC_ERR("error: Unsupported session"); 1188 } 1189 } else { 1190 switch (sess->ctxt_type) { 1191 case DPAA2_SEC_CIPHER: 1192 ret = build_cipher_fd(sess, op, fd, bpid); 1193 break; 1194 case DPAA2_SEC_AUTH: 1195 ret = build_auth_fd(sess, op, fd, bpid); 1196 break; 1197 case DPAA2_SEC_AEAD: 1198 ret = build_authenc_gcm_fd(sess, op, fd, bpid); 1199 break; 1200 case DPAA2_SEC_CIPHER_HASH: 1201 ret = build_authenc_fd(sess, op, fd, bpid); 1202 break; 1203 case DPAA2_SEC_IPSEC: 1204 ret = build_proto_fd(sess, op, fd, bpid); 1205 break; 1206 case DPAA2_SEC_PDCP: 1207 ret = build_proto_compound_fd(sess, op, fd, bpid); 1208 break; 1209 case DPAA2_SEC_HASH_CIPHER: 1210 default: 1211 DPAA2_SEC_ERR("error: Unsupported session"); 1212 } 1213 } 1214 return ret; 1215 } 1216 1217 static uint16_t 1218 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, 1219 uint16_t nb_ops) 1220 { 1221 /* Function to transmit the frames to given device and VQ*/ 1222 uint32_t loop; 1223 int32_t ret; 1224 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 1225 uint32_t frames_to_send; 1226 struct qbman_eq_desc eqdesc; 1227 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 1228 struct qbman_swp *swp; 1229 uint16_t num_tx = 0; 1230 uint32_t flags[MAX_TX_RING_SLOTS] = {0}; 1231 /*todo - need to support multiple buffer pools */ 1232 uint16_t bpid; 1233 struct rte_mempool *mb_pool; 1234 1235 if (unlikely(nb_ops == 0)) 1236 return 0; 1237 1238 if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { 1239 DPAA2_SEC_ERR("sessionless crypto op not supported"); 1240 return 0; 1241 } 1242 /*Prepare enqueue descriptor*/ 1243 qbman_eq_desc_clear(&eqdesc); 1244 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); 1245 qbman_eq_desc_set_response(&eqdesc, 0, 0); 1246 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid); 1247 1248 if (!DPAA2_PER_LCORE_DPIO) { 1249 ret = dpaa2_affine_qbman_swp(); 1250 if (ret) { 1251 DPAA2_SEC_ERR("Failure in affining portal"); 1252 return 0; 1253 } 1254 } 1255 swp = DPAA2_PER_LCORE_PORTAL; 1256 1257 while (nb_ops) { 1258 frames_to_send = (nb_ops > dpaa2_eqcr_size) ? 1259 dpaa2_eqcr_size : nb_ops; 1260 1261 for (loop = 0; loop < frames_to_send; loop++) { 1262 if ((*ops)->sym->m_src->seqn) { 1263 uint8_t dqrr_index = (*ops)->sym->m_src->seqn - 1; 1264 1265 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index; 1266 DPAA2_PER_LCORE_DQRR_SIZE--; 1267 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index); 1268 (*ops)->sym->m_src->seqn = DPAA2_INVALID_MBUF_SEQN; 1269 } 1270 1271 /*Clear the unused FD fields before sending*/ 1272 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd)); 1273 mb_pool = (*ops)->sym->m_src->pool; 1274 bpid = mempool_to_bpid(mb_pool); 1275 ret = build_sec_fd(*ops, &fd_arr[loop], bpid); 1276 if (ret) { 1277 DPAA2_SEC_ERR("error: Improper packet contents" 1278 " for crypto operation"); 1279 goto skip_tx; 1280 } 1281 ops++; 1282 } 1283 loop = 0; 1284 while (loop < frames_to_send) { 1285 loop += qbman_swp_enqueue_multiple(swp, &eqdesc, 1286 &fd_arr[loop], 1287 &flags[loop], 1288 frames_to_send - loop); 1289 } 1290 1291 num_tx += frames_to_send; 1292 nb_ops -= frames_to_send; 1293 } 1294 skip_tx: 1295 dpaa2_qp->tx_vq.tx_pkts += num_tx; 1296 dpaa2_qp->tx_vq.err_pkts += nb_ops; 1297 return num_tx; 1298 } 1299 1300 static inline struct rte_crypto_op * 1301 sec_simple_fd_to_mbuf(const struct qbman_fd *fd) 1302 { 1303 struct rte_crypto_op *op; 1304 uint16_t len = DPAA2_GET_FD_LEN(fd); 1305 uint16_t diff = 0; 1306 dpaa2_sec_session *sess_priv; 1307 1308 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF( 1309 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)), 1310 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 1311 1312 diff = len - mbuf->pkt_len; 1313 mbuf->pkt_len += diff; 1314 mbuf->data_len += diff; 1315 op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova; 1316 mbuf->buf_iova = op->sym->aead.digest.phys_addr; 1317 op->sym->aead.digest.phys_addr = 0L; 1318 1319 sess_priv = (dpaa2_sec_session *)get_sec_session_private_data( 1320 op->sym->sec_session); 1321 if (sess_priv->dir == DIR_ENC) 1322 mbuf->data_off += SEC_FLC_DHR_OUTBOUND; 1323 else 1324 mbuf->data_off += SEC_FLC_DHR_INBOUND; 1325 1326 return op; 1327 } 1328 1329 static inline struct rte_crypto_op * 1330 sec_fd_to_mbuf(const struct qbman_fd *fd) 1331 { 1332 struct qbman_fle *fle; 1333 struct rte_crypto_op *op; 1334 struct ctxt_priv *priv; 1335 struct rte_mbuf *dst, *src; 1336 1337 if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single) 1338 return sec_simple_fd_to_mbuf(fd); 1339 1340 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); 1341 1342 DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n", 1343 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset); 1344 1345 /* we are using the first FLE entry to store Mbuf. 1346 * Currently we donot know which FLE has the mbuf stored. 1347 * So while retreiving we can go back 1 FLE from the FD -ADDR 1348 * to get the MBUF Addr from the previous FLE. 1349 * We can have a better approach to use the inline Mbuf 1350 */ 1351 1352 if (unlikely(DPAA2_GET_FD_IVP(fd))) { 1353 /* TODO complete it. */ 1354 DPAA2_SEC_ERR("error: non inline buffer"); 1355 return NULL; 1356 } 1357 op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1)); 1358 1359 /* Prefeth op */ 1360 src = op->sym->m_src; 1361 rte_prefetch0(src); 1362 1363 if (op->sym->m_dst) { 1364 dst = op->sym->m_dst; 1365 rte_prefetch0(dst); 1366 } else 1367 dst = src; 1368 1369 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 1370 dpaa2_sec_session *sess = (dpaa2_sec_session *) 1371 get_sec_session_private_data(op->sym->sec_session); 1372 if (sess->ctxt_type == DPAA2_SEC_IPSEC) { 1373 uint16_t len = DPAA2_GET_FD_LEN(fd); 1374 dst->pkt_len = len; 1375 dst->data_len = len; 1376 } 1377 } 1378 1379 DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p," 1380 " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n", 1381 (void *)dst, 1382 dst->buf_addr, 1383 DPAA2_GET_FD_ADDR(fd), 1384 DPAA2_GET_FD_BPID(fd), 1385 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, 1386 DPAA2_GET_FD_OFFSET(fd), 1387 DPAA2_GET_FD_LEN(fd)); 1388 1389 /* free the fle memory */ 1390 if (likely(rte_pktmbuf_is_contiguous(src))) { 1391 priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1); 1392 rte_mempool_put(priv->fle_pool, (void *)(fle-1)); 1393 } else 1394 rte_free((void *)(fle-1)); 1395 1396 return op; 1397 } 1398 1399 static uint16_t 1400 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, 1401 uint16_t nb_ops) 1402 { 1403 /* Function is responsible to receive frames for a given device and VQ*/ 1404 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 1405 struct qbman_result *dq_storage; 1406 uint32_t fqid = dpaa2_qp->rx_vq.fqid; 1407 int ret, num_rx = 0; 1408 uint8_t is_last = 0, status; 1409 struct qbman_swp *swp; 1410 const struct qbman_fd *fd; 1411 struct qbman_pull_desc pulldesc; 1412 1413 if (!DPAA2_PER_LCORE_DPIO) { 1414 ret = dpaa2_affine_qbman_swp(); 1415 if (ret) { 1416 DPAA2_SEC_ERR("Failure in affining portal"); 1417 return 0; 1418 } 1419 } 1420 swp = DPAA2_PER_LCORE_PORTAL; 1421 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0]; 1422 1423 qbman_pull_desc_clear(&pulldesc); 1424 qbman_pull_desc_set_numframes(&pulldesc, 1425 (nb_ops > dpaa2_dqrr_size) ? 1426 dpaa2_dqrr_size : nb_ops); 1427 qbman_pull_desc_set_fq(&pulldesc, fqid); 1428 qbman_pull_desc_set_storage(&pulldesc, dq_storage, 1429 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage), 1430 1); 1431 1432 /*Issue a volatile dequeue command. */ 1433 while (1) { 1434 if (qbman_swp_pull(swp, &pulldesc)) { 1435 DPAA2_SEC_WARN( 1436 "SEC VDQ command is not issued : QBMAN busy"); 1437 /* Portal was busy, try again */ 1438 continue; 1439 } 1440 break; 1441 }; 1442 1443 /* Receive the packets till Last Dequeue entry is found with 1444 * respect to the above issues PULL command. 1445 */ 1446 while (!is_last) { 1447 /* Check if the previous issued command is completed. 1448 * Also seems like the SWP is shared between the Ethernet Driver 1449 * and the SEC driver. 1450 */ 1451 while (!qbman_check_command_complete(dq_storage)) 1452 ; 1453 1454 /* Loop until the dq_storage is updated with 1455 * new token by QBMAN 1456 */ 1457 while (!qbman_check_new_result(dq_storage)) 1458 ; 1459 /* Check whether Last Pull command is Expired and 1460 * setting Condition for Loop termination 1461 */ 1462 if (qbman_result_DQ_is_pull_complete(dq_storage)) { 1463 is_last = 1; 1464 /* Check for valid frame. */ 1465 status = (uint8_t)qbman_result_DQ_flags(dq_storage); 1466 if (unlikely( 1467 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) { 1468 DPAA2_SEC_DP_DEBUG("No frame is delivered\n"); 1469 continue; 1470 } 1471 } 1472 1473 fd = qbman_result_DQ_fd(dq_storage); 1474 ops[num_rx] = sec_fd_to_mbuf(fd); 1475 1476 if (unlikely(fd->simple.frc)) { 1477 /* TODO Parse SEC errors */ 1478 DPAA2_SEC_ERR("SEC returned Error - %x", 1479 fd->simple.frc); 1480 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR; 1481 } else { 1482 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 1483 } 1484 1485 num_rx++; 1486 dq_storage++; 1487 } /* End of Packet Rx loop */ 1488 1489 dpaa2_qp->rx_vq.rx_pkts += num_rx; 1490 1491 DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx); 1492 /*Return the total number of packets received to DPAA2 app*/ 1493 return num_rx; 1494 } 1495 1496 /** Release queue pair */ 1497 static int 1498 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) 1499 { 1500 struct dpaa2_sec_qp *qp = 1501 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id]; 1502 1503 PMD_INIT_FUNC_TRACE(); 1504 1505 if (qp->rx_vq.q_storage) { 1506 dpaa2_free_dq_storage(qp->rx_vq.q_storage); 1507 rte_free(qp->rx_vq.q_storage); 1508 } 1509 rte_free(qp); 1510 1511 dev->data->queue_pairs[queue_pair_id] = NULL; 1512 1513 return 0; 1514 } 1515 1516 /** Setup a queue pair */ 1517 static int 1518 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, 1519 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf, 1520 __rte_unused int socket_id) 1521 { 1522 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 1523 struct dpaa2_sec_qp *qp; 1524 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 1525 struct dpseci_rx_queue_cfg cfg; 1526 int32_t retcode; 1527 1528 PMD_INIT_FUNC_TRACE(); 1529 1530 /* If qp is already in use free ring memory and qp metadata. */ 1531 if (dev->data->queue_pairs[qp_id] != NULL) { 1532 DPAA2_SEC_INFO("QP already setup"); 1533 return 0; 1534 } 1535 1536 DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p", 1537 dev, qp_id, qp_conf); 1538 1539 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 1540 1541 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp), 1542 RTE_CACHE_LINE_SIZE); 1543 if (!qp) { 1544 DPAA2_SEC_ERR("malloc failed for rx/tx queues"); 1545 return -1; 1546 } 1547 1548 qp->rx_vq.crypto_data = dev->data; 1549 qp->tx_vq.crypto_data = dev->data; 1550 qp->rx_vq.q_storage = rte_malloc("sec dq storage", 1551 sizeof(struct queue_storage_info_t), 1552 RTE_CACHE_LINE_SIZE); 1553 if (!qp->rx_vq.q_storage) { 1554 DPAA2_SEC_ERR("malloc failed for q_storage"); 1555 return -1; 1556 } 1557 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t)); 1558 1559 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) { 1560 DPAA2_SEC_ERR("Unable to allocate dequeue storage"); 1561 return -1; 1562 } 1563 1564 dev->data->queue_pairs[qp_id] = qp; 1565 1566 cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX; 1567 cfg.user_ctx = (size_t)(&qp->rx_vq); 1568 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 1569 qp_id, &cfg); 1570 return retcode; 1571 } 1572 1573 /** Return the number of allocated queue pairs */ 1574 static uint32_t 1575 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev) 1576 { 1577 PMD_INIT_FUNC_TRACE(); 1578 1579 return dev->data->nb_queue_pairs; 1580 } 1581 1582 /** Returns the size of the aesni gcm session structure */ 1583 static unsigned int 1584 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) 1585 { 1586 PMD_INIT_FUNC_TRACE(); 1587 1588 return sizeof(dpaa2_sec_session); 1589 } 1590 1591 static int 1592 dpaa2_sec_cipher_init(struct rte_cryptodev *dev, 1593 struct rte_crypto_sym_xform *xform, 1594 dpaa2_sec_session *session) 1595 { 1596 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1597 struct alginfo cipherdata; 1598 int bufsize, i; 1599 struct ctxt_priv *priv; 1600 struct sec_flow_context *flc; 1601 1602 PMD_INIT_FUNC_TRACE(); 1603 1604 /* For SEC CIPHER only one descriptor is required. */ 1605 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1606 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 1607 RTE_CACHE_LINE_SIZE); 1608 if (priv == NULL) { 1609 DPAA2_SEC_ERR("No Memory for priv CTXT"); 1610 return -1; 1611 } 1612 1613 priv->fle_pool = dev_priv->fle_pool; 1614 1615 flc = &priv->flc_desc[0].flc; 1616 1617 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, 1618 RTE_CACHE_LINE_SIZE); 1619 if (session->cipher_key.data == NULL) { 1620 DPAA2_SEC_ERR("No Memory for cipher key"); 1621 rte_free(priv); 1622 return -1; 1623 } 1624 session->cipher_key.length = xform->cipher.key.length; 1625 1626 memcpy(session->cipher_key.data, xform->cipher.key.data, 1627 xform->cipher.key.length); 1628 cipherdata.key = (size_t)session->cipher_key.data; 1629 cipherdata.keylen = session->cipher_key.length; 1630 cipherdata.key_enc_flags = 0; 1631 cipherdata.key_type = RTA_DATA_IMM; 1632 1633 /* Set IV parameters */ 1634 session->iv.offset = xform->cipher.iv.offset; 1635 session->iv.length = xform->cipher.iv.length; 1636 1637 switch (xform->cipher.algo) { 1638 case RTE_CRYPTO_CIPHER_AES_CBC: 1639 cipherdata.algtype = OP_ALG_ALGSEL_AES; 1640 cipherdata.algmode = OP_ALG_AAI_CBC; 1641 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 1642 break; 1643 case RTE_CRYPTO_CIPHER_3DES_CBC: 1644 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 1645 cipherdata.algmode = OP_ALG_AAI_CBC; 1646 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 1647 break; 1648 case RTE_CRYPTO_CIPHER_AES_CTR: 1649 cipherdata.algtype = OP_ALG_ALGSEL_AES; 1650 cipherdata.algmode = OP_ALG_AAI_CTR; 1651 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 1652 break; 1653 case RTE_CRYPTO_CIPHER_3DES_CTR: 1654 case RTE_CRYPTO_CIPHER_AES_ECB: 1655 case RTE_CRYPTO_CIPHER_3DES_ECB: 1656 case RTE_CRYPTO_CIPHER_AES_XTS: 1657 case RTE_CRYPTO_CIPHER_AES_F8: 1658 case RTE_CRYPTO_CIPHER_ARC4: 1659 case RTE_CRYPTO_CIPHER_KASUMI_F8: 1660 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 1661 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 1662 case RTE_CRYPTO_CIPHER_NULL: 1663 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", 1664 xform->cipher.algo); 1665 goto error_out; 1666 default: 1667 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 1668 xform->cipher.algo); 1669 goto error_out; 1670 } 1671 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1672 DIR_ENC : DIR_DEC; 1673 1674 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, SHR_NEVER, 1675 &cipherdata, NULL, session->iv.length, 1676 session->dir); 1677 if (bufsize < 0) { 1678 DPAA2_SEC_ERR("Crypto: Descriptor build failed"); 1679 goto error_out; 1680 } 1681 1682 flc->word1_sdl = (uint8_t)bufsize; 1683 session->ctxt = priv; 1684 1685 for (i = 0; i < bufsize; i++) 1686 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]); 1687 1688 return 0; 1689 1690 error_out: 1691 rte_free(session->cipher_key.data); 1692 rte_free(priv); 1693 return -1; 1694 } 1695 1696 static int 1697 dpaa2_sec_auth_init(struct rte_cryptodev *dev, 1698 struct rte_crypto_sym_xform *xform, 1699 dpaa2_sec_session *session) 1700 { 1701 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1702 struct alginfo authdata; 1703 int bufsize, i; 1704 struct ctxt_priv *priv; 1705 struct sec_flow_context *flc; 1706 1707 PMD_INIT_FUNC_TRACE(); 1708 1709 /* For SEC AUTH three descriptors are required for various stages */ 1710 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1711 sizeof(struct ctxt_priv) + 3 * 1712 sizeof(struct sec_flc_desc), 1713 RTE_CACHE_LINE_SIZE); 1714 if (priv == NULL) { 1715 DPAA2_SEC_ERR("No Memory for priv CTXT"); 1716 return -1; 1717 } 1718 1719 priv->fle_pool = dev_priv->fle_pool; 1720 flc = &priv->flc_desc[DESC_INITFINAL].flc; 1721 1722 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length, 1723 RTE_CACHE_LINE_SIZE); 1724 if (session->auth_key.data == NULL) { 1725 DPAA2_SEC_ERR("Unable to allocate memory for auth key"); 1726 rte_free(priv); 1727 return -1; 1728 } 1729 session->auth_key.length = xform->auth.key.length; 1730 1731 memcpy(session->auth_key.data, xform->auth.key.data, 1732 xform->auth.key.length); 1733 authdata.key = (size_t)session->auth_key.data; 1734 authdata.keylen = session->auth_key.length; 1735 authdata.key_enc_flags = 0; 1736 authdata.key_type = RTA_DATA_IMM; 1737 1738 session->digest_length = xform->auth.digest_length; 1739 1740 switch (xform->auth.algo) { 1741 case RTE_CRYPTO_AUTH_SHA1_HMAC: 1742 authdata.algtype = OP_ALG_ALGSEL_SHA1; 1743 authdata.algmode = OP_ALG_AAI_HMAC; 1744 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 1745 break; 1746 case RTE_CRYPTO_AUTH_MD5_HMAC: 1747 authdata.algtype = OP_ALG_ALGSEL_MD5; 1748 authdata.algmode = OP_ALG_AAI_HMAC; 1749 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 1750 break; 1751 case RTE_CRYPTO_AUTH_SHA256_HMAC: 1752 authdata.algtype = OP_ALG_ALGSEL_SHA256; 1753 authdata.algmode = OP_ALG_AAI_HMAC; 1754 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 1755 break; 1756 case RTE_CRYPTO_AUTH_SHA384_HMAC: 1757 authdata.algtype = OP_ALG_ALGSEL_SHA384; 1758 authdata.algmode = OP_ALG_AAI_HMAC; 1759 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 1760 break; 1761 case RTE_CRYPTO_AUTH_SHA512_HMAC: 1762 authdata.algtype = OP_ALG_ALGSEL_SHA512; 1763 authdata.algmode = OP_ALG_AAI_HMAC; 1764 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 1765 break; 1766 case RTE_CRYPTO_AUTH_SHA224_HMAC: 1767 authdata.algtype = OP_ALG_ALGSEL_SHA224; 1768 authdata.algmode = OP_ALG_AAI_HMAC; 1769 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 1770 break; 1771 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 1772 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 1773 case RTE_CRYPTO_AUTH_NULL: 1774 case RTE_CRYPTO_AUTH_SHA1: 1775 case RTE_CRYPTO_AUTH_SHA256: 1776 case RTE_CRYPTO_AUTH_SHA512: 1777 case RTE_CRYPTO_AUTH_SHA224: 1778 case RTE_CRYPTO_AUTH_SHA384: 1779 case RTE_CRYPTO_AUTH_MD5: 1780 case RTE_CRYPTO_AUTH_AES_GMAC: 1781 case RTE_CRYPTO_AUTH_KASUMI_F9: 1782 case RTE_CRYPTO_AUTH_AES_CMAC: 1783 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 1784 case RTE_CRYPTO_AUTH_ZUC_EIA3: 1785 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un", 1786 xform->auth.algo); 1787 goto error_out; 1788 default: 1789 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", 1790 xform->auth.algo); 1791 goto error_out; 1792 } 1793 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? 1794 DIR_ENC : DIR_DEC; 1795 1796 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 1797 1, 0, SHR_NEVER, &authdata, !session->dir, 1798 session->digest_length); 1799 if (bufsize < 0) { 1800 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 1801 goto error_out; 1802 } 1803 1804 flc->word1_sdl = (uint8_t)bufsize; 1805 session->ctxt = priv; 1806 for (i = 0; i < bufsize; i++) 1807 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", 1808 i, priv->flc_desc[DESC_INITFINAL].desc[i]); 1809 1810 1811 return 0; 1812 1813 error_out: 1814 rte_free(session->auth_key.data); 1815 rte_free(priv); 1816 return -1; 1817 } 1818 1819 static int 1820 dpaa2_sec_aead_init(struct rte_cryptodev *dev, 1821 struct rte_crypto_sym_xform *xform, 1822 dpaa2_sec_session *session) 1823 { 1824 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt; 1825 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1826 struct alginfo aeaddata; 1827 int bufsize, i; 1828 struct ctxt_priv *priv; 1829 struct sec_flow_context *flc; 1830 struct rte_crypto_aead_xform *aead_xform = &xform->aead; 1831 int err; 1832 1833 PMD_INIT_FUNC_TRACE(); 1834 1835 /* Set IV parameters */ 1836 session->iv.offset = aead_xform->iv.offset; 1837 session->iv.length = aead_xform->iv.length; 1838 session->ctxt_type = DPAA2_SEC_AEAD; 1839 1840 /* For SEC AEAD only one descriptor is required */ 1841 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1842 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 1843 RTE_CACHE_LINE_SIZE); 1844 if (priv == NULL) { 1845 DPAA2_SEC_ERR("No Memory for priv CTXT"); 1846 return -1; 1847 } 1848 1849 priv->fle_pool = dev_priv->fle_pool; 1850 flc = &priv->flc_desc[0].flc; 1851 1852 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 1853 RTE_CACHE_LINE_SIZE); 1854 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 1855 DPAA2_SEC_ERR("No Memory for aead key"); 1856 rte_free(priv); 1857 return -1; 1858 } 1859 memcpy(session->aead_key.data, aead_xform->key.data, 1860 aead_xform->key.length); 1861 1862 session->digest_length = aead_xform->digest_length; 1863 session->aead_key.length = aead_xform->key.length; 1864 ctxt->auth_only_len = aead_xform->aad_length; 1865 1866 aeaddata.key = (size_t)session->aead_key.data; 1867 aeaddata.keylen = session->aead_key.length; 1868 aeaddata.key_enc_flags = 0; 1869 aeaddata.key_type = RTA_DATA_IMM; 1870 1871 switch (aead_xform->algo) { 1872 case RTE_CRYPTO_AEAD_AES_GCM: 1873 aeaddata.algtype = OP_ALG_ALGSEL_AES; 1874 aeaddata.algmode = OP_ALG_AAI_GCM; 1875 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; 1876 break; 1877 case RTE_CRYPTO_AEAD_AES_CCM: 1878 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u", 1879 aead_xform->algo); 1880 goto error_out; 1881 default: 1882 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u", 1883 aead_xform->algo); 1884 goto error_out; 1885 } 1886 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 1887 DIR_ENC : DIR_DEC; 1888 1889 priv->flc_desc[0].desc[0] = aeaddata.keylen; 1890 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 1891 MIN_JOB_DESC_SIZE, 1892 (unsigned int *)priv->flc_desc[0].desc, 1893 &priv->flc_desc[0].desc[1], 1); 1894 1895 if (err < 0) { 1896 DPAA2_SEC_ERR("Crypto: Incorrect key lengths"); 1897 goto error_out; 1898 } 1899 if (priv->flc_desc[0].desc[1] & 1) { 1900 aeaddata.key_type = RTA_DATA_IMM; 1901 } else { 1902 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key); 1903 aeaddata.key_type = RTA_DATA_PTR; 1904 } 1905 priv->flc_desc[0].desc[0] = 0; 1906 priv->flc_desc[0].desc[1] = 0; 1907 1908 if (session->dir == DIR_ENC) 1909 bufsize = cnstr_shdsc_gcm_encap( 1910 priv->flc_desc[0].desc, 1, 0, SHR_NEVER, 1911 &aeaddata, session->iv.length, 1912 session->digest_length); 1913 else 1914 bufsize = cnstr_shdsc_gcm_decap( 1915 priv->flc_desc[0].desc, 1, 0, SHR_NEVER, 1916 &aeaddata, session->iv.length, 1917 session->digest_length); 1918 if (bufsize < 0) { 1919 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 1920 goto error_out; 1921 } 1922 1923 flc->word1_sdl = (uint8_t)bufsize; 1924 session->ctxt = priv; 1925 for (i = 0; i < bufsize; i++) 1926 DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n", 1927 i, priv->flc_desc[0].desc[i]); 1928 1929 return 0; 1930 1931 error_out: 1932 rte_free(session->aead_key.data); 1933 rte_free(priv); 1934 return -1; 1935 } 1936 1937 1938 static int 1939 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, 1940 struct rte_crypto_sym_xform *xform, 1941 dpaa2_sec_session *session) 1942 { 1943 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt; 1944 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1945 struct alginfo authdata, cipherdata; 1946 int bufsize, i; 1947 struct ctxt_priv *priv; 1948 struct sec_flow_context *flc; 1949 struct rte_crypto_cipher_xform *cipher_xform; 1950 struct rte_crypto_auth_xform *auth_xform; 1951 int err; 1952 1953 PMD_INIT_FUNC_TRACE(); 1954 1955 if (session->ext_params.aead_ctxt.auth_cipher_text) { 1956 cipher_xform = &xform->cipher; 1957 auth_xform = &xform->next->auth; 1958 session->ctxt_type = 1959 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1960 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER; 1961 } else { 1962 cipher_xform = &xform->next->cipher; 1963 auth_xform = &xform->auth; 1964 session->ctxt_type = 1965 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1966 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH; 1967 } 1968 1969 /* Set IV parameters */ 1970 session->iv.offset = cipher_xform->iv.offset; 1971 session->iv.length = cipher_xform->iv.length; 1972 1973 /* For SEC AEAD only one descriptor is required */ 1974 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1975 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 1976 RTE_CACHE_LINE_SIZE); 1977 if (priv == NULL) { 1978 DPAA2_SEC_ERR("No Memory for priv CTXT"); 1979 return -1; 1980 } 1981 1982 priv->fle_pool = dev_priv->fle_pool; 1983 flc = &priv->flc_desc[0].flc; 1984 1985 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length, 1986 RTE_CACHE_LINE_SIZE); 1987 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) { 1988 DPAA2_SEC_ERR("No Memory for cipher key"); 1989 rte_free(priv); 1990 return -1; 1991 } 1992 session->cipher_key.length = cipher_xform->key.length; 1993 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length, 1994 RTE_CACHE_LINE_SIZE); 1995 if (session->auth_key.data == NULL && auth_xform->key.length > 0) { 1996 DPAA2_SEC_ERR("No Memory for auth key"); 1997 rte_free(session->cipher_key.data); 1998 rte_free(priv); 1999 return -1; 2000 } 2001 session->auth_key.length = auth_xform->key.length; 2002 memcpy(session->cipher_key.data, cipher_xform->key.data, 2003 cipher_xform->key.length); 2004 memcpy(session->auth_key.data, auth_xform->key.data, 2005 auth_xform->key.length); 2006 2007 authdata.key = (size_t)session->auth_key.data; 2008 authdata.keylen = session->auth_key.length; 2009 authdata.key_enc_flags = 0; 2010 authdata.key_type = RTA_DATA_IMM; 2011 2012 session->digest_length = auth_xform->digest_length; 2013 2014 switch (auth_xform->algo) { 2015 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2016 authdata.algtype = OP_ALG_ALGSEL_SHA1; 2017 authdata.algmode = OP_ALG_AAI_HMAC; 2018 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 2019 break; 2020 case RTE_CRYPTO_AUTH_MD5_HMAC: 2021 authdata.algtype = OP_ALG_ALGSEL_MD5; 2022 authdata.algmode = OP_ALG_AAI_HMAC; 2023 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 2024 break; 2025 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2026 authdata.algtype = OP_ALG_ALGSEL_SHA224; 2027 authdata.algmode = OP_ALG_AAI_HMAC; 2028 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 2029 break; 2030 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2031 authdata.algtype = OP_ALG_ALGSEL_SHA256; 2032 authdata.algmode = OP_ALG_AAI_HMAC; 2033 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 2034 break; 2035 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2036 authdata.algtype = OP_ALG_ALGSEL_SHA384; 2037 authdata.algmode = OP_ALG_AAI_HMAC; 2038 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 2039 break; 2040 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2041 authdata.algtype = OP_ALG_ALGSEL_SHA512; 2042 authdata.algmode = OP_ALG_AAI_HMAC; 2043 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 2044 break; 2045 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2046 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2047 case RTE_CRYPTO_AUTH_NULL: 2048 case RTE_CRYPTO_AUTH_SHA1: 2049 case RTE_CRYPTO_AUTH_SHA256: 2050 case RTE_CRYPTO_AUTH_SHA512: 2051 case RTE_CRYPTO_AUTH_SHA224: 2052 case RTE_CRYPTO_AUTH_SHA384: 2053 case RTE_CRYPTO_AUTH_MD5: 2054 case RTE_CRYPTO_AUTH_AES_GMAC: 2055 case RTE_CRYPTO_AUTH_KASUMI_F9: 2056 case RTE_CRYPTO_AUTH_AES_CMAC: 2057 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 2058 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2059 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 2060 auth_xform->algo); 2061 goto error_out; 2062 default: 2063 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", 2064 auth_xform->algo); 2065 goto error_out; 2066 } 2067 cipherdata.key = (size_t)session->cipher_key.data; 2068 cipherdata.keylen = session->cipher_key.length; 2069 cipherdata.key_enc_flags = 0; 2070 cipherdata.key_type = RTA_DATA_IMM; 2071 2072 switch (cipher_xform->algo) { 2073 case RTE_CRYPTO_CIPHER_AES_CBC: 2074 cipherdata.algtype = OP_ALG_ALGSEL_AES; 2075 cipherdata.algmode = OP_ALG_AAI_CBC; 2076 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 2077 break; 2078 case RTE_CRYPTO_CIPHER_3DES_CBC: 2079 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 2080 cipherdata.algmode = OP_ALG_AAI_CBC; 2081 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 2082 break; 2083 case RTE_CRYPTO_CIPHER_AES_CTR: 2084 cipherdata.algtype = OP_ALG_ALGSEL_AES; 2085 cipherdata.algmode = OP_ALG_AAI_CTR; 2086 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 2087 break; 2088 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2089 case RTE_CRYPTO_CIPHER_NULL: 2090 case RTE_CRYPTO_CIPHER_3DES_ECB: 2091 case RTE_CRYPTO_CIPHER_AES_ECB: 2092 case RTE_CRYPTO_CIPHER_KASUMI_F8: 2093 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", 2094 cipher_xform->algo); 2095 goto error_out; 2096 default: 2097 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 2098 cipher_xform->algo); 2099 goto error_out; 2100 } 2101 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2102 DIR_ENC : DIR_DEC; 2103 2104 priv->flc_desc[0].desc[0] = cipherdata.keylen; 2105 priv->flc_desc[0].desc[1] = authdata.keylen; 2106 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 2107 MIN_JOB_DESC_SIZE, 2108 (unsigned int *)priv->flc_desc[0].desc, 2109 &priv->flc_desc[0].desc[2], 2); 2110 2111 if (err < 0) { 2112 DPAA2_SEC_ERR("Crypto: Incorrect key lengths"); 2113 goto error_out; 2114 } 2115 if (priv->flc_desc[0].desc[2] & 1) { 2116 cipherdata.key_type = RTA_DATA_IMM; 2117 } else { 2118 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); 2119 cipherdata.key_type = RTA_DATA_PTR; 2120 } 2121 if (priv->flc_desc[0].desc[2] & (1 << 1)) { 2122 authdata.key_type = RTA_DATA_IMM; 2123 } else { 2124 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key); 2125 authdata.key_type = RTA_DATA_PTR; 2126 } 2127 priv->flc_desc[0].desc[0] = 0; 2128 priv->flc_desc[0].desc[1] = 0; 2129 priv->flc_desc[0].desc[2] = 0; 2130 2131 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) { 2132 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1, 2133 0, SHR_SERIAL, 2134 &cipherdata, &authdata, 2135 session->iv.length, 2136 ctxt->auth_only_len, 2137 session->digest_length, 2138 session->dir); 2139 if (bufsize < 0) { 2140 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2141 goto error_out; 2142 } 2143 } else { 2144 DPAA2_SEC_ERR("Hash before cipher not supported"); 2145 goto error_out; 2146 } 2147 2148 flc->word1_sdl = (uint8_t)bufsize; 2149 session->ctxt = priv; 2150 for (i = 0; i < bufsize; i++) 2151 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", 2152 i, priv->flc_desc[0].desc[i]); 2153 2154 return 0; 2155 2156 error_out: 2157 rte_free(session->cipher_key.data); 2158 rte_free(session->auth_key.data); 2159 rte_free(priv); 2160 return -1; 2161 } 2162 2163 static int 2164 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev, 2165 struct rte_crypto_sym_xform *xform, void *sess) 2166 { 2167 dpaa2_sec_session *session = sess; 2168 2169 PMD_INIT_FUNC_TRACE(); 2170 2171 if (unlikely(sess == NULL)) { 2172 DPAA2_SEC_ERR("Invalid session struct"); 2173 return -1; 2174 } 2175 2176 memset(session, 0, sizeof(dpaa2_sec_session)); 2177 /* Default IV length = 0 */ 2178 session->iv.length = 0; 2179 2180 /* Cipher Only */ 2181 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 2182 session->ctxt_type = DPAA2_SEC_CIPHER; 2183 dpaa2_sec_cipher_init(dev, xform, session); 2184 2185 /* Authentication Only */ 2186 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2187 xform->next == NULL) { 2188 session->ctxt_type = DPAA2_SEC_AUTH; 2189 dpaa2_sec_auth_init(dev, xform, session); 2190 2191 /* Cipher then Authenticate */ 2192 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 2193 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2194 session->ext_params.aead_ctxt.auth_cipher_text = true; 2195 dpaa2_sec_aead_chain_init(dev, xform, session); 2196 2197 /* Authenticate then Cipher */ 2198 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2199 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2200 session->ext_params.aead_ctxt.auth_cipher_text = false; 2201 dpaa2_sec_aead_chain_init(dev, xform, session); 2202 2203 /* AEAD operation for AES-GCM kind of Algorithms */ 2204 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && 2205 xform->next == NULL) { 2206 dpaa2_sec_aead_init(dev, xform, session); 2207 2208 } else { 2209 DPAA2_SEC_ERR("Invalid crypto type"); 2210 return -EINVAL; 2211 } 2212 2213 return 0; 2214 } 2215 2216 static int 2217 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform, 2218 dpaa2_sec_session *session, 2219 struct alginfo *aeaddata) 2220 { 2221 PMD_INIT_FUNC_TRACE(); 2222 2223 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 2224 RTE_CACHE_LINE_SIZE); 2225 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 2226 DPAA2_SEC_ERR("No Memory for aead key"); 2227 return -1; 2228 } 2229 memcpy(session->aead_key.data, aead_xform->key.data, 2230 aead_xform->key.length); 2231 2232 session->digest_length = aead_xform->digest_length; 2233 session->aead_key.length = aead_xform->key.length; 2234 2235 aeaddata->key = (size_t)session->aead_key.data; 2236 aeaddata->keylen = session->aead_key.length; 2237 aeaddata->key_enc_flags = 0; 2238 aeaddata->key_type = RTA_DATA_IMM; 2239 2240 switch (aead_xform->algo) { 2241 case RTE_CRYPTO_AEAD_AES_GCM: 2242 aeaddata->algtype = OP_ALG_ALGSEL_AES; 2243 aeaddata->algmode = OP_ALG_AAI_GCM; 2244 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; 2245 break; 2246 case RTE_CRYPTO_AEAD_AES_CCM: 2247 aeaddata->algtype = OP_ALG_ALGSEL_AES; 2248 aeaddata->algmode = OP_ALG_AAI_CCM; 2249 session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM; 2250 break; 2251 default: 2252 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u", 2253 aead_xform->algo); 2254 return -1; 2255 } 2256 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 2257 DIR_ENC : DIR_DEC; 2258 2259 return 0; 2260 } 2261 2262 static int 2263 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform, 2264 struct rte_crypto_auth_xform *auth_xform, 2265 dpaa2_sec_session *session, 2266 struct alginfo *cipherdata, 2267 struct alginfo *authdata) 2268 { 2269 if (cipher_xform) { 2270 session->cipher_key.data = rte_zmalloc(NULL, 2271 cipher_xform->key.length, 2272 RTE_CACHE_LINE_SIZE); 2273 if (session->cipher_key.data == NULL && 2274 cipher_xform->key.length > 0) { 2275 DPAA2_SEC_ERR("No Memory for cipher key"); 2276 return -ENOMEM; 2277 } 2278 2279 session->cipher_key.length = cipher_xform->key.length; 2280 memcpy(session->cipher_key.data, cipher_xform->key.data, 2281 cipher_xform->key.length); 2282 session->cipher_alg = cipher_xform->algo; 2283 } else { 2284 session->cipher_key.data = NULL; 2285 session->cipher_key.length = 0; 2286 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2287 } 2288 2289 if (auth_xform) { 2290 session->auth_key.data = rte_zmalloc(NULL, 2291 auth_xform->key.length, 2292 RTE_CACHE_LINE_SIZE); 2293 if (session->auth_key.data == NULL && 2294 auth_xform->key.length > 0) { 2295 DPAA2_SEC_ERR("No Memory for auth key"); 2296 return -ENOMEM; 2297 } 2298 session->auth_key.length = auth_xform->key.length; 2299 memcpy(session->auth_key.data, auth_xform->key.data, 2300 auth_xform->key.length); 2301 session->auth_alg = auth_xform->algo; 2302 } else { 2303 session->auth_key.data = NULL; 2304 session->auth_key.length = 0; 2305 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 2306 } 2307 2308 authdata->key = (size_t)session->auth_key.data; 2309 authdata->keylen = session->auth_key.length; 2310 authdata->key_enc_flags = 0; 2311 authdata->key_type = RTA_DATA_IMM; 2312 switch (session->auth_alg) { 2313 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2314 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96; 2315 authdata->algmode = OP_ALG_AAI_HMAC; 2316 break; 2317 case RTE_CRYPTO_AUTH_MD5_HMAC: 2318 authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96; 2319 authdata->algmode = OP_ALG_AAI_HMAC; 2320 break; 2321 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2322 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128; 2323 authdata->algmode = OP_ALG_AAI_HMAC; 2324 break; 2325 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2326 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192; 2327 authdata->algmode = OP_ALG_AAI_HMAC; 2328 break; 2329 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2330 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256; 2331 authdata->algmode = OP_ALG_AAI_HMAC; 2332 break; 2333 case RTE_CRYPTO_AUTH_AES_CMAC: 2334 authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96; 2335 break; 2336 case RTE_CRYPTO_AUTH_NULL: 2337 authdata->algtype = OP_PCL_IPSEC_HMAC_NULL; 2338 break; 2339 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2340 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2341 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2342 case RTE_CRYPTO_AUTH_SHA1: 2343 case RTE_CRYPTO_AUTH_SHA256: 2344 case RTE_CRYPTO_AUTH_SHA512: 2345 case RTE_CRYPTO_AUTH_SHA224: 2346 case RTE_CRYPTO_AUTH_SHA384: 2347 case RTE_CRYPTO_AUTH_MD5: 2348 case RTE_CRYPTO_AUTH_AES_GMAC: 2349 case RTE_CRYPTO_AUTH_KASUMI_F9: 2350 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 2351 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2352 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 2353 session->auth_alg); 2354 return -1; 2355 default: 2356 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", 2357 session->auth_alg); 2358 return -1; 2359 } 2360 cipherdata->key = (size_t)session->cipher_key.data; 2361 cipherdata->keylen = session->cipher_key.length; 2362 cipherdata->key_enc_flags = 0; 2363 cipherdata->key_type = RTA_DATA_IMM; 2364 2365 switch (session->cipher_alg) { 2366 case RTE_CRYPTO_CIPHER_AES_CBC: 2367 cipherdata->algtype = OP_PCL_IPSEC_AES_CBC; 2368 cipherdata->algmode = OP_ALG_AAI_CBC; 2369 break; 2370 case RTE_CRYPTO_CIPHER_3DES_CBC: 2371 cipherdata->algtype = OP_PCL_IPSEC_3DES; 2372 cipherdata->algmode = OP_ALG_AAI_CBC; 2373 break; 2374 case RTE_CRYPTO_CIPHER_AES_CTR: 2375 cipherdata->algtype = OP_PCL_IPSEC_AES_CTR; 2376 cipherdata->algmode = OP_ALG_AAI_CTR; 2377 break; 2378 case RTE_CRYPTO_CIPHER_NULL: 2379 cipherdata->algtype = OP_PCL_IPSEC_NULL; 2380 break; 2381 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2382 case RTE_CRYPTO_CIPHER_3DES_ECB: 2383 case RTE_CRYPTO_CIPHER_AES_ECB: 2384 case RTE_CRYPTO_CIPHER_KASUMI_F8: 2385 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", 2386 session->cipher_alg); 2387 return -1; 2388 default: 2389 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 2390 session->cipher_alg); 2391 return -1; 2392 } 2393 2394 return 0; 2395 } 2396 2397 #ifdef RTE_LIBRTE_SECURITY_TEST 2398 static uint8_t aes_cbc_iv[] = { 2399 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 2400 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }; 2401 #endif 2402 2403 static int 2404 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, 2405 struct rte_security_session_conf *conf, 2406 void *sess) 2407 { 2408 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec; 2409 struct rte_crypto_cipher_xform *cipher_xform = NULL; 2410 struct rte_crypto_auth_xform *auth_xform = NULL; 2411 struct rte_crypto_aead_xform *aead_xform = NULL; 2412 dpaa2_sec_session *session = (dpaa2_sec_session *)sess; 2413 struct ctxt_priv *priv; 2414 struct ipsec_encap_pdb encap_pdb; 2415 struct ipsec_decap_pdb decap_pdb; 2416 struct alginfo authdata, cipherdata; 2417 int bufsize; 2418 struct sec_flow_context *flc; 2419 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 2420 int ret = -1; 2421 2422 PMD_INIT_FUNC_TRACE(); 2423 2424 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2425 sizeof(struct ctxt_priv) + 2426 sizeof(struct sec_flc_desc), 2427 RTE_CACHE_LINE_SIZE); 2428 2429 if (priv == NULL) { 2430 DPAA2_SEC_ERR("No memory for priv CTXT"); 2431 return -ENOMEM; 2432 } 2433 2434 priv->fle_pool = dev_priv->fle_pool; 2435 flc = &priv->flc_desc[0].flc; 2436 2437 memset(session, 0, sizeof(dpaa2_sec_session)); 2438 2439 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2440 cipher_xform = &conf->crypto_xform->cipher; 2441 if (conf->crypto_xform->next) 2442 auth_xform = &conf->crypto_xform->next->auth; 2443 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform, 2444 session, &cipherdata, &authdata); 2445 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2446 auth_xform = &conf->crypto_xform->auth; 2447 if (conf->crypto_xform->next) 2448 cipher_xform = &conf->crypto_xform->next->cipher; 2449 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform, 2450 session, &cipherdata, &authdata); 2451 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { 2452 aead_xform = &conf->crypto_xform->aead; 2453 ret = dpaa2_sec_ipsec_aead_init(aead_xform, 2454 session, &cipherdata); 2455 } else { 2456 DPAA2_SEC_ERR("XFORM not specified"); 2457 ret = -EINVAL; 2458 goto out; 2459 } 2460 if (ret) { 2461 DPAA2_SEC_ERR("Failed to process xform"); 2462 goto out; 2463 } 2464 2465 session->ctxt_type = DPAA2_SEC_IPSEC; 2466 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 2467 struct ip ip4_hdr; 2468 2469 flc->dhr = SEC_FLC_DHR_OUTBOUND; 2470 ip4_hdr.ip_v = IPVERSION; 2471 ip4_hdr.ip_hl = 5; 2472 ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr)); 2473 ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp; 2474 ip4_hdr.ip_id = 0; 2475 ip4_hdr.ip_off = 0; 2476 ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl; 2477 ip4_hdr.ip_p = IPPROTO_ESP; 2478 ip4_hdr.ip_sum = 0; 2479 ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip; 2480 ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip; 2481 ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)&ip4_hdr, 2482 sizeof(struct ip)); 2483 2484 /* For Sec Proto only one descriptor is required. */ 2485 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb)); 2486 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) | 2487 PDBOPTS_ESP_OIHI_PDB_INL | 2488 PDBOPTS_ESP_IVSRC | 2489 PDBHMO_ESP_ENCAP_DTTL | 2490 PDBHMO_ESP_SNR; 2491 encap_pdb.spi = ipsec_xform->spi; 2492 encap_pdb.ip_hdr_len = sizeof(struct ip); 2493 2494 session->dir = DIR_ENC; 2495 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc, 2496 1, 0, SHR_SERIAL, &encap_pdb, 2497 (uint8_t *)&ip4_hdr, 2498 &cipherdata, &authdata); 2499 } else if (ipsec_xform->direction == 2500 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { 2501 flc->dhr = SEC_FLC_DHR_INBOUND; 2502 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb)); 2503 decap_pdb.options = sizeof(struct ip) << 16; 2504 session->dir = DIR_DEC; 2505 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc, 2506 1, 0, SHR_SERIAL, 2507 &decap_pdb, &cipherdata, &authdata); 2508 } else 2509 goto out; 2510 2511 if (bufsize < 0) { 2512 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2513 goto out; 2514 } 2515 2516 flc->word1_sdl = (uint8_t)bufsize; 2517 2518 /* Enable the stashing control bit */ 2519 DPAA2_SET_FLC_RSC(flc); 2520 flc->word2_rflc_31_0 = lower_32_bits( 2521 (size_t)&(((struct dpaa2_sec_qp *) 2522 dev->data->queue_pairs[0])->rx_vq) | 0x14); 2523 flc->word3_rflc_63_32 = upper_32_bits( 2524 (size_t)&(((struct dpaa2_sec_qp *) 2525 dev->data->queue_pairs[0])->rx_vq)); 2526 2527 /* Set EWS bit i.e. enable write-safe */ 2528 DPAA2_SET_FLC_EWS(flc); 2529 /* Set BS = 1 i.e reuse input buffers as output buffers */ 2530 DPAA2_SET_FLC_REUSE_BS(flc); 2531 /* Set FF = 10; reuse input buffers if they provide sufficient space */ 2532 DPAA2_SET_FLC_REUSE_FF(flc); 2533 2534 session->ctxt = priv; 2535 2536 return 0; 2537 out: 2538 rte_free(session->auth_key.data); 2539 rte_free(session->cipher_key.data); 2540 rte_free(priv); 2541 return ret; 2542 } 2543 2544 static int 2545 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev, 2546 struct rte_security_session_conf *conf, 2547 void *sess) 2548 { 2549 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp; 2550 struct rte_crypto_sym_xform *xform = conf->crypto_xform; 2551 struct rte_crypto_auth_xform *auth_xform = NULL; 2552 struct rte_crypto_cipher_xform *cipher_xform; 2553 dpaa2_sec_session *session = (dpaa2_sec_session *)sess; 2554 struct ctxt_priv *priv; 2555 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 2556 struct alginfo authdata, cipherdata; 2557 int bufsize = -1; 2558 struct sec_flow_context *flc; 2559 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 2560 int swap = true; 2561 #else 2562 int swap = false; 2563 #endif 2564 2565 PMD_INIT_FUNC_TRACE(); 2566 2567 memset(session, 0, sizeof(dpaa2_sec_session)); 2568 2569 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2570 sizeof(struct ctxt_priv) + 2571 sizeof(struct sec_flc_desc), 2572 RTE_CACHE_LINE_SIZE); 2573 2574 if (priv == NULL) { 2575 DPAA2_SEC_ERR("No memory for priv CTXT"); 2576 return -ENOMEM; 2577 } 2578 2579 priv->fle_pool = dev_priv->fle_pool; 2580 flc = &priv->flc_desc[0].flc; 2581 2582 /* find xfrm types */ 2583 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 2584 cipher_xform = &xform->cipher; 2585 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 2586 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2587 session->ext_params.aead_ctxt.auth_cipher_text = true; 2588 cipher_xform = &xform->cipher; 2589 auth_xform = &xform->next->auth; 2590 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2591 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2592 session->ext_params.aead_ctxt.auth_cipher_text = false; 2593 cipher_xform = &xform->next->cipher; 2594 auth_xform = &xform->auth; 2595 } else { 2596 DPAA2_SEC_ERR("Invalid crypto type"); 2597 return -EINVAL; 2598 } 2599 2600 session->ctxt_type = DPAA2_SEC_PDCP; 2601 if (cipher_xform) { 2602 session->cipher_key.data = rte_zmalloc(NULL, 2603 cipher_xform->key.length, 2604 RTE_CACHE_LINE_SIZE); 2605 if (session->cipher_key.data == NULL && 2606 cipher_xform->key.length > 0) { 2607 DPAA2_SEC_ERR("No Memory for cipher key"); 2608 rte_free(priv); 2609 return -ENOMEM; 2610 } 2611 session->cipher_key.length = cipher_xform->key.length; 2612 memcpy(session->cipher_key.data, cipher_xform->key.data, 2613 cipher_xform->key.length); 2614 session->dir = 2615 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2616 DIR_ENC : DIR_DEC; 2617 session->cipher_alg = cipher_xform->algo; 2618 } else { 2619 session->cipher_key.data = NULL; 2620 session->cipher_key.length = 0; 2621 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2622 session->dir = DIR_ENC; 2623 } 2624 2625 session->pdcp.domain = pdcp_xform->domain; 2626 session->pdcp.bearer = pdcp_xform->bearer; 2627 session->pdcp.pkt_dir = pdcp_xform->pkt_dir; 2628 session->pdcp.sn_size = pdcp_xform->sn_size; 2629 #ifdef ENABLE_HFN_OVERRIDE 2630 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovd; 2631 #endif 2632 session->pdcp.hfn = pdcp_xform->hfn; 2633 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold; 2634 2635 cipherdata.key = (size_t)session->cipher_key.data; 2636 cipherdata.keylen = session->cipher_key.length; 2637 cipherdata.key_enc_flags = 0; 2638 cipherdata.key_type = RTA_DATA_IMM; 2639 2640 switch (session->cipher_alg) { 2641 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2642 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW; 2643 break; 2644 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2645 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC; 2646 break; 2647 case RTE_CRYPTO_CIPHER_AES_CTR: 2648 cipherdata.algtype = PDCP_CIPHER_TYPE_AES; 2649 break; 2650 case RTE_CRYPTO_CIPHER_NULL: 2651 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL; 2652 break; 2653 default: 2654 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 2655 session->cipher_alg); 2656 goto out; 2657 } 2658 2659 /* Auth is only applicable for control mode operation. */ 2660 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 2661 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5) { 2662 DPAA2_SEC_ERR( 2663 "PDCP Seq Num size should be 5 bits for cmode"); 2664 goto out; 2665 } 2666 if (auth_xform) { 2667 session->auth_key.data = rte_zmalloc(NULL, 2668 auth_xform->key.length, 2669 RTE_CACHE_LINE_SIZE); 2670 if (session->auth_key.data == NULL && 2671 auth_xform->key.length > 0) { 2672 DPAA2_SEC_ERR("No Memory for auth key"); 2673 rte_free(session->cipher_key.data); 2674 rte_free(priv); 2675 return -ENOMEM; 2676 } 2677 session->auth_key.length = auth_xform->key.length; 2678 memcpy(session->auth_key.data, auth_xform->key.data, 2679 auth_xform->key.length); 2680 session->auth_alg = auth_xform->algo; 2681 } else { 2682 session->auth_key.data = NULL; 2683 session->auth_key.length = 0; 2684 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 2685 } 2686 authdata.key = (size_t)session->auth_key.data; 2687 authdata.keylen = session->auth_key.length; 2688 authdata.key_enc_flags = 0; 2689 authdata.key_type = RTA_DATA_IMM; 2690 2691 switch (session->auth_alg) { 2692 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2693 authdata.algtype = PDCP_AUTH_TYPE_SNOW; 2694 break; 2695 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2696 authdata.algtype = PDCP_AUTH_TYPE_ZUC; 2697 break; 2698 case RTE_CRYPTO_AUTH_AES_CMAC: 2699 authdata.algtype = PDCP_AUTH_TYPE_AES; 2700 break; 2701 case RTE_CRYPTO_AUTH_NULL: 2702 authdata.algtype = PDCP_AUTH_TYPE_NULL; 2703 break; 2704 default: 2705 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 2706 session->auth_alg); 2707 goto out; 2708 } 2709 2710 if (session->dir == DIR_ENC) 2711 bufsize = cnstr_shdsc_pdcp_c_plane_encap( 2712 priv->flc_desc[0].desc, 1, swap, 2713 pdcp_xform->hfn, 2714 pdcp_xform->bearer, 2715 pdcp_xform->pkt_dir, 2716 pdcp_xform->hfn_threshold, 2717 &cipherdata, &authdata, 2718 0); 2719 else if (session->dir == DIR_DEC) 2720 bufsize = cnstr_shdsc_pdcp_c_plane_decap( 2721 priv->flc_desc[0].desc, 1, swap, 2722 pdcp_xform->hfn, 2723 pdcp_xform->bearer, 2724 pdcp_xform->pkt_dir, 2725 pdcp_xform->hfn_threshold, 2726 &cipherdata, &authdata, 2727 0); 2728 } else { 2729 if (session->dir == DIR_ENC) 2730 bufsize = cnstr_shdsc_pdcp_u_plane_encap( 2731 priv->flc_desc[0].desc, 1, swap, 2732 (enum pdcp_sn_size)pdcp_xform->sn_size, 2733 pdcp_xform->hfn, 2734 pdcp_xform->bearer, 2735 pdcp_xform->pkt_dir, 2736 pdcp_xform->hfn_threshold, 2737 &cipherdata, 0); 2738 else if (session->dir == DIR_DEC) 2739 bufsize = cnstr_shdsc_pdcp_u_plane_decap( 2740 priv->flc_desc[0].desc, 1, swap, 2741 (enum pdcp_sn_size)pdcp_xform->sn_size, 2742 pdcp_xform->hfn, 2743 pdcp_xform->bearer, 2744 pdcp_xform->pkt_dir, 2745 pdcp_xform->hfn_threshold, 2746 &cipherdata, 0); 2747 } 2748 2749 if (bufsize < 0) { 2750 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2751 goto out; 2752 } 2753 2754 /* Enable the stashing control bit */ 2755 DPAA2_SET_FLC_RSC(flc); 2756 flc->word2_rflc_31_0 = lower_32_bits( 2757 (size_t)&(((struct dpaa2_sec_qp *) 2758 dev->data->queue_pairs[0])->rx_vq) | 0x14); 2759 flc->word3_rflc_63_32 = upper_32_bits( 2760 (size_t)&(((struct dpaa2_sec_qp *) 2761 dev->data->queue_pairs[0])->rx_vq)); 2762 2763 flc->word1_sdl = (uint8_t)bufsize; 2764 2765 /* Set EWS bit i.e. enable write-safe */ 2766 DPAA2_SET_FLC_EWS(flc); 2767 /* Set BS = 1 i.e reuse input buffers as output buffers */ 2768 DPAA2_SET_FLC_REUSE_BS(flc); 2769 /* Set FF = 10; reuse input buffers if they provide sufficient space */ 2770 DPAA2_SET_FLC_REUSE_FF(flc); 2771 2772 session->ctxt = priv; 2773 2774 return 0; 2775 out: 2776 rte_free(session->auth_key.data); 2777 rte_free(session->cipher_key.data); 2778 rte_free(priv); 2779 return -1; 2780 } 2781 2782 static int 2783 dpaa2_sec_security_session_create(void *dev, 2784 struct rte_security_session_conf *conf, 2785 struct rte_security_session *sess, 2786 struct rte_mempool *mempool) 2787 { 2788 void *sess_private_data; 2789 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; 2790 int ret; 2791 2792 if (rte_mempool_get(mempool, &sess_private_data)) { 2793 DPAA2_SEC_ERR("Couldn't get object from session mempool"); 2794 return -ENOMEM; 2795 } 2796 2797 switch (conf->protocol) { 2798 case RTE_SECURITY_PROTOCOL_IPSEC: 2799 ret = dpaa2_sec_set_ipsec_session(cdev, conf, 2800 sess_private_data); 2801 break; 2802 case RTE_SECURITY_PROTOCOL_MACSEC: 2803 return -ENOTSUP; 2804 case RTE_SECURITY_PROTOCOL_PDCP: 2805 ret = dpaa2_sec_set_pdcp_session(cdev, conf, 2806 sess_private_data); 2807 break; 2808 default: 2809 return -EINVAL; 2810 } 2811 if (ret != 0) { 2812 DPAA2_SEC_ERR("Failed to configure session parameters"); 2813 /* Return session to mempool */ 2814 rte_mempool_put(mempool, sess_private_data); 2815 return ret; 2816 } 2817 2818 set_sec_session_private_data(sess, sess_private_data); 2819 2820 return ret; 2821 } 2822 2823 /** Clear the memory of session so it doesn't leave key material behind */ 2824 static int 2825 dpaa2_sec_security_session_destroy(void *dev __rte_unused, 2826 struct rte_security_session *sess) 2827 { 2828 PMD_INIT_FUNC_TRACE(); 2829 void *sess_priv = get_sec_session_private_data(sess); 2830 2831 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv; 2832 2833 if (sess_priv) { 2834 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 2835 2836 rte_free(s->ctxt); 2837 rte_free(s->cipher_key.data); 2838 rte_free(s->auth_key.data); 2839 memset(s, 0, sizeof(dpaa2_sec_session)); 2840 set_sec_session_private_data(sess, NULL); 2841 rte_mempool_put(sess_mp, sess_priv); 2842 } 2843 return 0; 2844 } 2845 2846 static int 2847 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev, 2848 struct rte_crypto_sym_xform *xform, 2849 struct rte_cryptodev_sym_session *sess, 2850 struct rte_mempool *mempool) 2851 { 2852 void *sess_private_data; 2853 int ret; 2854 2855 if (rte_mempool_get(mempool, &sess_private_data)) { 2856 DPAA2_SEC_ERR("Couldn't get object from session mempool"); 2857 return -ENOMEM; 2858 } 2859 2860 ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data); 2861 if (ret != 0) { 2862 DPAA2_SEC_ERR("Failed to configure session parameters"); 2863 /* Return session to mempool */ 2864 rte_mempool_put(mempool, sess_private_data); 2865 return ret; 2866 } 2867 2868 set_sym_session_private_data(sess, dev->driver_id, 2869 sess_private_data); 2870 2871 return 0; 2872 } 2873 2874 /** Clear the memory of session so it doesn't leave key material behind */ 2875 static void 2876 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev, 2877 struct rte_cryptodev_sym_session *sess) 2878 { 2879 PMD_INIT_FUNC_TRACE(); 2880 uint8_t index = dev->driver_id; 2881 void *sess_priv = get_sym_session_private_data(sess, index); 2882 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv; 2883 2884 if (sess_priv) { 2885 rte_free(s->ctxt); 2886 rte_free(s->cipher_key.data); 2887 rte_free(s->auth_key.data); 2888 memset(s, 0, sizeof(dpaa2_sec_session)); 2889 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 2890 set_sym_session_private_data(sess, index, NULL); 2891 rte_mempool_put(sess_mp, sess_priv); 2892 } 2893 } 2894 2895 static int 2896 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused, 2897 struct rte_cryptodev_config *config __rte_unused) 2898 { 2899 PMD_INIT_FUNC_TRACE(); 2900 2901 return 0; 2902 } 2903 2904 static int 2905 dpaa2_sec_dev_start(struct rte_cryptodev *dev) 2906 { 2907 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 2908 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 2909 struct dpseci_attr attr; 2910 struct dpaa2_queue *dpaa2_q; 2911 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 2912 dev->data->queue_pairs; 2913 struct dpseci_rx_queue_attr rx_attr; 2914 struct dpseci_tx_queue_attr tx_attr; 2915 int ret, i; 2916 2917 PMD_INIT_FUNC_TRACE(); 2918 2919 memset(&attr, 0, sizeof(struct dpseci_attr)); 2920 2921 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token); 2922 if (ret) { 2923 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED", 2924 priv->hw_id); 2925 goto get_attr_failure; 2926 } 2927 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr); 2928 if (ret) { 2929 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC"); 2930 goto get_attr_failure; 2931 } 2932 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) { 2933 dpaa2_q = &qp[i]->rx_vq; 2934 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 2935 &rx_attr); 2936 dpaa2_q->fqid = rx_attr.fqid; 2937 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid); 2938 } 2939 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) { 2940 dpaa2_q = &qp[i]->tx_vq; 2941 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 2942 &tx_attr); 2943 dpaa2_q->fqid = tx_attr.fqid; 2944 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid); 2945 } 2946 2947 return 0; 2948 get_attr_failure: 2949 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 2950 return -1; 2951 } 2952 2953 static void 2954 dpaa2_sec_dev_stop(struct rte_cryptodev *dev) 2955 { 2956 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 2957 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 2958 int ret; 2959 2960 PMD_INIT_FUNC_TRACE(); 2961 2962 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 2963 if (ret) { 2964 DPAA2_SEC_ERR("Failure in disabling dpseci %d device", 2965 priv->hw_id); 2966 return; 2967 } 2968 2969 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token); 2970 if (ret < 0) { 2971 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret); 2972 return; 2973 } 2974 } 2975 2976 static int 2977 dpaa2_sec_dev_close(struct rte_cryptodev *dev) 2978 { 2979 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 2980 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 2981 int ret; 2982 2983 PMD_INIT_FUNC_TRACE(); 2984 2985 /* Function is reverse of dpaa2_sec_dev_init. 2986 * It does the following: 2987 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id 2988 * 2. Close the DPSECI device 2989 * 3. Free the allocated resources. 2990 */ 2991 2992 /*Close the device at underlying layer*/ 2993 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token); 2994 if (ret) { 2995 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret); 2996 return -1; 2997 } 2998 2999 /*Free the allocated memory for ethernet private data and dpseci*/ 3000 priv->hw = NULL; 3001 rte_free(dpseci); 3002 3003 return 0; 3004 } 3005 3006 static void 3007 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev, 3008 struct rte_cryptodev_info *info) 3009 { 3010 struct dpaa2_sec_dev_private *internals = dev->data->dev_private; 3011 3012 PMD_INIT_FUNC_TRACE(); 3013 if (info != NULL) { 3014 info->max_nb_queue_pairs = internals->max_nb_queue_pairs; 3015 info->feature_flags = dev->feature_flags; 3016 info->capabilities = dpaa2_sec_capabilities; 3017 /* No limit of number of sessions */ 3018 info->sym.max_nb_sessions = 0; 3019 info->driver_id = cryptodev_driver_id; 3020 } 3021 } 3022 3023 static 3024 void dpaa2_sec_stats_get(struct rte_cryptodev *dev, 3025 struct rte_cryptodev_stats *stats) 3026 { 3027 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3028 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3029 struct dpseci_sec_counters counters = {0}; 3030 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3031 dev->data->queue_pairs; 3032 int ret, i; 3033 3034 PMD_INIT_FUNC_TRACE(); 3035 if (stats == NULL) { 3036 DPAA2_SEC_ERR("Invalid stats ptr NULL"); 3037 return; 3038 } 3039 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 3040 if (qp[i] == NULL) { 3041 DPAA2_SEC_DEBUG("Uninitialised queue pair"); 3042 continue; 3043 } 3044 3045 stats->enqueued_count += qp[i]->tx_vq.tx_pkts; 3046 stats->dequeued_count += qp[i]->rx_vq.rx_pkts; 3047 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts; 3048 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts; 3049 } 3050 3051 ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token, 3052 &counters); 3053 if (ret) { 3054 DPAA2_SEC_ERR("SEC counters failed"); 3055 } else { 3056 DPAA2_SEC_INFO("dpseci hardware stats:" 3057 "\n\tNum of Requests Dequeued = %" PRIu64 3058 "\n\tNum of Outbound Encrypt Requests = %" PRIu64 3059 "\n\tNum of Inbound Decrypt Requests = %" PRIu64 3060 "\n\tNum of Outbound Bytes Encrypted = %" PRIu64 3061 "\n\tNum of Outbound Bytes Protected = %" PRIu64 3062 "\n\tNum of Inbound Bytes Decrypted = %" PRIu64 3063 "\n\tNum of Inbound Bytes Validated = %" PRIu64, 3064 counters.dequeued_requests, 3065 counters.ob_enc_requests, 3066 counters.ib_dec_requests, 3067 counters.ob_enc_bytes, 3068 counters.ob_prot_bytes, 3069 counters.ib_dec_bytes, 3070 counters.ib_valid_bytes); 3071 } 3072 } 3073 3074 static 3075 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev) 3076 { 3077 int i; 3078 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3079 (dev->data->queue_pairs); 3080 3081 PMD_INIT_FUNC_TRACE(); 3082 3083 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 3084 if (qp[i] == NULL) { 3085 DPAA2_SEC_DEBUG("Uninitialised queue pair"); 3086 continue; 3087 } 3088 qp[i]->tx_vq.rx_pkts = 0; 3089 qp[i]->tx_vq.tx_pkts = 0; 3090 qp[i]->tx_vq.err_pkts = 0; 3091 qp[i]->rx_vq.rx_pkts = 0; 3092 qp[i]->rx_vq.tx_pkts = 0; 3093 qp[i]->rx_vq.err_pkts = 0; 3094 } 3095 } 3096 3097 static void __attribute__((hot)) 3098 dpaa2_sec_process_parallel_event(struct qbman_swp *swp, 3099 const struct qbman_fd *fd, 3100 const struct qbman_result *dq, 3101 struct dpaa2_queue *rxq, 3102 struct rte_event *ev) 3103 { 3104 /* Prefetching mbuf */ 3105 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)- 3106 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size)); 3107 3108 /* Prefetching ipsec crypto_op stored in priv data of mbuf */ 3109 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64)); 3110 3111 ev->flow_id = rxq->ev.flow_id; 3112 ev->sub_event_type = rxq->ev.sub_event_type; 3113 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3114 ev->op = RTE_EVENT_OP_NEW; 3115 ev->sched_type = rxq->ev.sched_type; 3116 ev->queue_id = rxq->ev.queue_id; 3117 ev->priority = rxq->ev.priority; 3118 ev->event_ptr = sec_fd_to_mbuf(fd); 3119 3120 qbman_swp_dqrr_consume(swp, dq); 3121 } 3122 static void 3123 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __attribute__((unused)), 3124 const struct qbman_fd *fd, 3125 const struct qbman_result *dq, 3126 struct dpaa2_queue *rxq, 3127 struct rte_event *ev) 3128 { 3129 uint8_t dqrr_index; 3130 struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr; 3131 /* Prefetching mbuf */ 3132 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)- 3133 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size)); 3134 3135 /* Prefetching ipsec crypto_op stored in priv data of mbuf */ 3136 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64)); 3137 3138 ev->flow_id = rxq->ev.flow_id; 3139 ev->sub_event_type = rxq->ev.sub_event_type; 3140 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3141 ev->op = RTE_EVENT_OP_NEW; 3142 ev->sched_type = rxq->ev.sched_type; 3143 ev->queue_id = rxq->ev.queue_id; 3144 ev->priority = rxq->ev.priority; 3145 3146 ev->event_ptr = sec_fd_to_mbuf(fd); 3147 dqrr_index = qbman_get_dqrr_idx(dq); 3148 crypto_op->sym->m_src->seqn = dqrr_index + 1; 3149 DPAA2_PER_LCORE_DQRR_SIZE++; 3150 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; 3151 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src; 3152 } 3153 3154 int 3155 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev, 3156 int qp_id, 3157 uint16_t dpcon_id, 3158 const struct rte_event *event) 3159 { 3160 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3161 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3162 struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id]; 3163 struct dpseci_rx_queue_cfg cfg; 3164 int ret; 3165 3166 if (event->sched_type == RTE_SCHED_TYPE_PARALLEL) 3167 qp->rx_vq.cb = dpaa2_sec_process_parallel_event; 3168 else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) 3169 qp->rx_vq.cb = dpaa2_sec_process_atomic_event; 3170 else 3171 return -EINVAL; 3172 3173 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 3174 cfg.options = DPSECI_QUEUE_OPT_DEST; 3175 cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON; 3176 cfg.dest_cfg.dest_id = dpcon_id; 3177 cfg.dest_cfg.priority = event->priority; 3178 3179 cfg.options |= DPSECI_QUEUE_OPT_USER_CTX; 3180 cfg.user_ctx = (size_t)(qp); 3181 if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) { 3182 cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION; 3183 cfg.order_preservation_en = 1; 3184 } 3185 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 3186 qp_id, &cfg); 3187 if (ret) { 3188 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret); 3189 return ret; 3190 } 3191 3192 memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event)); 3193 3194 return 0; 3195 } 3196 3197 int 3198 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev, 3199 int qp_id) 3200 { 3201 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3202 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3203 struct dpseci_rx_queue_cfg cfg; 3204 int ret; 3205 3206 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 3207 cfg.options = DPSECI_QUEUE_OPT_DEST; 3208 cfg.dest_cfg.dest_type = DPSECI_DEST_NONE; 3209 3210 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 3211 qp_id, &cfg); 3212 if (ret) 3213 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret); 3214 3215 return ret; 3216 } 3217 3218 static struct rte_cryptodev_ops crypto_ops = { 3219 .dev_configure = dpaa2_sec_dev_configure, 3220 .dev_start = dpaa2_sec_dev_start, 3221 .dev_stop = dpaa2_sec_dev_stop, 3222 .dev_close = dpaa2_sec_dev_close, 3223 .dev_infos_get = dpaa2_sec_dev_infos_get, 3224 .stats_get = dpaa2_sec_stats_get, 3225 .stats_reset = dpaa2_sec_stats_reset, 3226 .queue_pair_setup = dpaa2_sec_queue_pair_setup, 3227 .queue_pair_release = dpaa2_sec_queue_pair_release, 3228 .queue_pair_count = dpaa2_sec_queue_pair_count, 3229 .sym_session_get_size = dpaa2_sec_sym_session_get_size, 3230 .sym_session_configure = dpaa2_sec_sym_session_configure, 3231 .sym_session_clear = dpaa2_sec_sym_session_clear, 3232 }; 3233 3234 static const struct rte_security_capability * 3235 dpaa2_sec_capabilities_get(void *device __rte_unused) 3236 { 3237 return dpaa2_sec_security_cap; 3238 } 3239 3240 static const struct rte_security_ops dpaa2_sec_security_ops = { 3241 .session_create = dpaa2_sec_security_session_create, 3242 .session_update = NULL, 3243 .session_stats_get = NULL, 3244 .session_destroy = dpaa2_sec_security_session_destroy, 3245 .set_pkt_metadata = NULL, 3246 .capabilities_get = dpaa2_sec_capabilities_get 3247 }; 3248 3249 static int 3250 dpaa2_sec_uninit(const struct rte_cryptodev *dev) 3251 { 3252 struct dpaa2_sec_dev_private *internals = dev->data->dev_private; 3253 3254 rte_free(dev->security_ctx); 3255 3256 rte_mempool_free(internals->fle_pool); 3257 3258 DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u", 3259 dev->data->name, rte_socket_id()); 3260 3261 return 0; 3262 } 3263 3264 static int 3265 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev) 3266 { 3267 struct dpaa2_sec_dev_private *internals; 3268 struct rte_device *dev = cryptodev->device; 3269 struct rte_dpaa2_device *dpaa2_dev; 3270 struct rte_security_ctx *security_instance; 3271 struct fsl_mc_io *dpseci; 3272 uint16_t token; 3273 struct dpseci_attr attr; 3274 int retcode, hw_id; 3275 char str[30]; 3276 3277 PMD_INIT_FUNC_TRACE(); 3278 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 3279 if (dpaa2_dev == NULL) { 3280 DPAA2_SEC_ERR("DPAA2 SEC device not found"); 3281 return -1; 3282 } 3283 hw_id = dpaa2_dev->object_id; 3284 3285 cryptodev->driver_id = cryptodev_driver_id; 3286 cryptodev->dev_ops = &crypto_ops; 3287 3288 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst; 3289 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst; 3290 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 3291 RTE_CRYPTODEV_FF_HW_ACCELERATED | 3292 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 3293 RTE_CRYPTODEV_FF_SECURITY | 3294 RTE_CRYPTODEV_FF_IN_PLACE_SGL | 3295 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | 3296 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | 3297 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | 3298 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; 3299 3300 internals = cryptodev->data->dev_private; 3301 3302 /* 3303 * For secondary processes, we don't initialise any further as primary 3304 * has already done this work. Only check we don't need a different 3305 * RX function 3306 */ 3307 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 3308 DPAA2_SEC_DEBUG("Device already init by primary process"); 3309 return 0; 3310 } 3311 3312 /* Initialize security_ctx only for primary process*/ 3313 security_instance = rte_malloc("rte_security_instances_ops", 3314 sizeof(struct rte_security_ctx), 0); 3315 if (security_instance == NULL) 3316 return -ENOMEM; 3317 security_instance->device = (void *)cryptodev; 3318 security_instance->ops = &dpaa2_sec_security_ops; 3319 security_instance->sess_cnt = 0; 3320 cryptodev->security_ctx = security_instance; 3321 3322 /*Open the rte device via MC and save the handle for further use*/ 3323 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1, 3324 sizeof(struct fsl_mc_io), 0); 3325 if (!dpseci) { 3326 DPAA2_SEC_ERR( 3327 "Error in allocating the memory for dpsec object"); 3328 return -1; 3329 } 3330 dpseci->regs = rte_mcp_ptr_list[0]; 3331 3332 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token); 3333 if (retcode != 0) { 3334 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x", 3335 retcode); 3336 goto init_error; 3337 } 3338 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr); 3339 if (retcode != 0) { 3340 DPAA2_SEC_ERR( 3341 "Cannot get dpsec device attributed: Error = %x", 3342 retcode); 3343 goto init_error; 3344 } 3345 snprintf(cryptodev->data->name, sizeof(cryptodev->data->name), 3346 "dpsec-%u", hw_id); 3347 3348 internals->max_nb_queue_pairs = attr.num_tx_queues; 3349 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs; 3350 internals->hw = dpseci; 3351 internals->token = token; 3352 3353 snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d", 3354 getpid(), cryptodev->data->dev_id); 3355 internals->fle_pool = rte_mempool_create((const char *)str, 3356 FLE_POOL_NUM_BUFS, 3357 FLE_POOL_BUF_SIZE, 3358 FLE_POOL_CACHE_SIZE, 0, 3359 NULL, NULL, NULL, NULL, 3360 SOCKET_ID_ANY, 0); 3361 if (!internals->fle_pool) { 3362 DPAA2_SEC_ERR("Mempool (%s) creation failed", str); 3363 goto init_error; 3364 } 3365 3366 DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name); 3367 return 0; 3368 3369 init_error: 3370 DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name); 3371 3372 /* dpaa2_sec_uninit(crypto_dev_name); */ 3373 return -EFAULT; 3374 } 3375 3376 static int 3377 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused, 3378 struct rte_dpaa2_device *dpaa2_dev) 3379 { 3380 struct rte_cryptodev *cryptodev; 3381 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 3382 3383 int retval; 3384 3385 snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d", 3386 dpaa2_dev->object_id); 3387 3388 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id()); 3389 if (cryptodev == NULL) 3390 return -ENOMEM; 3391 3392 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 3393 cryptodev->data->dev_private = rte_zmalloc_socket( 3394 "cryptodev private structure", 3395 sizeof(struct dpaa2_sec_dev_private), 3396 RTE_CACHE_LINE_SIZE, 3397 rte_socket_id()); 3398 3399 if (cryptodev->data->dev_private == NULL) 3400 rte_panic("Cannot allocate memzone for private " 3401 "device data"); 3402 } 3403 3404 dpaa2_dev->cryptodev = cryptodev; 3405 cryptodev->device = &dpaa2_dev->device; 3406 3407 /* init user callbacks */ 3408 TAILQ_INIT(&(cryptodev->link_intr_cbs)); 3409 3410 /* Invoke PMD device initialization function */ 3411 retval = dpaa2_sec_dev_init(cryptodev); 3412 if (retval == 0) 3413 return 0; 3414 3415 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 3416 rte_free(cryptodev->data->dev_private); 3417 3418 cryptodev->attached = RTE_CRYPTODEV_DETACHED; 3419 3420 return -ENXIO; 3421 } 3422 3423 static int 3424 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev) 3425 { 3426 struct rte_cryptodev *cryptodev; 3427 int ret; 3428 3429 cryptodev = dpaa2_dev->cryptodev; 3430 if (cryptodev == NULL) 3431 return -ENODEV; 3432 3433 ret = dpaa2_sec_uninit(cryptodev); 3434 if (ret) 3435 return ret; 3436 3437 return rte_cryptodev_pmd_destroy(cryptodev); 3438 } 3439 3440 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = { 3441 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA, 3442 .drv_type = DPAA2_CRYPTO, 3443 .driver = { 3444 .name = "DPAA2 SEC PMD" 3445 }, 3446 .probe = cryptodev_dpaa2_sec_probe, 3447 .remove = cryptodev_dpaa2_sec_remove, 3448 }; 3449 3450 static struct cryptodev_driver dpaa2_sec_crypto_drv; 3451 3452 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver); 3453 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, 3454 rte_dpaa2_sec_driver.driver, cryptodev_driver_id); 3455 3456 RTE_INIT(dpaa2_sec_init_log) 3457 { 3458 /* Bus level logs */ 3459 dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2"); 3460 if (dpaa2_logtype_sec >= 0) 3461 rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE); 3462 } 3463