1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2016-2018 NXP 5 * 6 */ 7 8 #include <time.h> 9 #include <net/if.h> 10 11 #include <rte_mbuf.h> 12 #include <rte_cryptodev.h> 13 #include <rte_malloc.h> 14 #include <rte_memcpy.h> 15 #include <rte_string_fns.h> 16 #include <rte_cycles.h> 17 #include <rte_kvargs.h> 18 #include <rte_dev.h> 19 #include <rte_cryptodev_pmd.h> 20 #include <rte_common.h> 21 #include <rte_fslmc.h> 22 #include <fslmc_vfio.h> 23 #include <dpaa2_hw_pvt.h> 24 #include <dpaa2_hw_dpio.h> 25 #include <dpaa2_hw_mempool.h> 26 #include <fsl_dpopr.h> 27 #include <fsl_dpseci.h> 28 #include <fsl_mc_sys.h> 29 30 #include "dpaa2_sec_priv.h" 31 #include "dpaa2_sec_event.h" 32 #include "dpaa2_sec_logs.h" 33 34 /* Required types */ 35 typedef uint64_t dma_addr_t; 36 37 /* RTA header files */ 38 #include <hw/desc/ipsec.h> 39 #include <hw/desc/pdcp.h> 40 #include <hw/desc/algo.h> 41 42 /* Minimum job descriptor consists of a oneword job descriptor HEADER and 43 * a pointer to the shared descriptor 44 */ 45 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ) 46 #define FSL_VENDOR_ID 0x1957 47 #define FSL_DEVICE_ID 0x410 48 #define FSL_SUBSYSTEM_SEC 1 49 #define FSL_MC_DPSECI_DEVID 3 50 51 #define NO_PREFETCH 0 52 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */ 53 #define FLE_POOL_NUM_BUFS 32000 54 #define FLE_POOL_BUF_SIZE 256 55 #define FLE_POOL_CACHE_SIZE 512 56 #define FLE_SG_MEM_SIZE 2048 57 #define SEC_FLC_DHR_OUTBOUND -114 58 #define SEC_FLC_DHR_INBOUND 0 59 60 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8; 61 62 static uint8_t cryptodev_driver_id; 63 64 int dpaa2_logtype_sec; 65 66 static inline int 67 build_proto_compound_fd(dpaa2_sec_session *sess, 68 struct rte_crypto_op *op, 69 struct qbman_fd *fd, uint16_t bpid) 70 { 71 struct rte_crypto_sym_op *sym_op = op->sym; 72 struct ctxt_priv *priv = sess->ctxt; 73 struct qbman_fle *fle, *ip_fle, *op_fle; 74 struct sec_flow_context *flc; 75 struct rte_mbuf *src_mbuf = sym_op->m_src; 76 struct rte_mbuf *dst_mbuf = sym_op->m_dst; 77 int retval; 78 79 if (!dst_mbuf) 80 dst_mbuf = src_mbuf; 81 82 /* Save the shared descriptor */ 83 flc = &priv->flc_desc[0].flc; 84 85 /* we are using the first FLE entry to store Mbuf */ 86 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 87 if (retval) { 88 DPAA2_SEC_ERR("Memory alloc failed"); 89 return -1; 90 } 91 memset(fle, 0, FLE_POOL_BUF_SIZE); 92 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 93 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 94 95 op_fle = fle + 1; 96 ip_fle = fle + 2; 97 98 if (likely(bpid < MAX_BPID)) { 99 DPAA2_SET_FD_BPID(fd, bpid); 100 DPAA2_SET_FLE_BPID(op_fle, bpid); 101 DPAA2_SET_FLE_BPID(ip_fle, bpid); 102 } else { 103 DPAA2_SET_FD_IVP(fd); 104 DPAA2_SET_FLE_IVP(op_fle); 105 DPAA2_SET_FLE_IVP(ip_fle); 106 } 107 108 /* Configure FD as a FRAME LIST */ 109 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 110 DPAA2_SET_FD_COMPOUND_FMT(fd); 111 DPAA2_SET_FD_FLC(fd, (ptrdiff_t)flc); 112 113 /* Configure Output FLE with dst mbuf data */ 114 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf)); 115 DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off); 116 DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len); 117 118 /* Configure Input FLE with src mbuf data */ 119 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf)); 120 DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off); 121 DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len); 122 123 DPAA2_SET_FD_LEN(fd, ip_fle->length); 124 DPAA2_SET_FLE_FIN(ip_fle); 125 126 #ifdef ENABLE_HFN_OVERRIDE 127 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) { 128 /*enable HFN override override */ 129 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, sess->pdcp.hfn_ovd); 130 DPAA2_SET_FLE_INTERNAL_JD(op_fle, sess->pdcp.hfn_ovd); 131 DPAA2_SET_FD_INTERNAL_JD(fd, sess->pdcp.hfn_ovd); 132 } 133 #endif 134 135 return 0; 136 137 } 138 139 static inline int 140 build_proto_fd(dpaa2_sec_session *sess, 141 struct rte_crypto_op *op, 142 struct qbman_fd *fd, uint16_t bpid) 143 { 144 struct rte_crypto_sym_op *sym_op = op->sym; 145 if (sym_op->m_dst) 146 return build_proto_compound_fd(sess, op, fd, bpid); 147 148 struct ctxt_priv *priv = sess->ctxt; 149 struct sec_flow_context *flc; 150 struct rte_mbuf *mbuf = sym_op->m_src; 151 152 if (likely(bpid < MAX_BPID)) 153 DPAA2_SET_FD_BPID(fd, bpid); 154 else 155 DPAA2_SET_FD_IVP(fd); 156 157 /* Save the shared descriptor */ 158 flc = &priv->flc_desc[0].flc; 159 160 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 161 DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off); 162 DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len); 163 DPAA2_SET_FD_FLC(fd, (ptrdiff_t)flc); 164 165 /* save physical address of mbuf */ 166 op->sym->aead.digest.phys_addr = mbuf->buf_iova; 167 mbuf->buf_iova = (size_t)op; 168 169 return 0; 170 } 171 172 static inline int 173 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess, 174 struct rte_crypto_op *op, 175 struct qbman_fd *fd, __rte_unused uint16_t bpid) 176 { 177 struct rte_crypto_sym_op *sym_op = op->sym; 178 struct ctxt_priv *priv = sess->ctxt; 179 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 180 struct sec_flow_context *flc; 181 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len; 182 int icv_len = sess->digest_length; 183 uint8_t *old_icv; 184 struct rte_mbuf *mbuf; 185 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 186 sess->iv.offset); 187 188 PMD_INIT_FUNC_TRACE(); 189 190 if (sym_op->m_dst) 191 mbuf = sym_op->m_dst; 192 else 193 mbuf = sym_op->m_src; 194 195 /* first FLE entry used to store mbuf and session ctxt */ 196 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE, 197 RTE_CACHE_LINE_SIZE); 198 if (unlikely(!fle)) { 199 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE"); 200 return -1; 201 } 202 memset(fle, 0, FLE_SG_MEM_SIZE); 203 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 204 DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv); 205 206 op_fle = fle + 1; 207 ip_fle = fle + 2; 208 sge = fle + 3; 209 210 /* Save the shared descriptor */ 211 flc = &priv->flc_desc[0].flc; 212 213 /* Configure FD as a FRAME LIST */ 214 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 215 DPAA2_SET_FD_COMPOUND_FMT(fd); 216 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 217 218 DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n" 219 "iv-len=%d data_off: 0x%x\n", 220 sym_op->aead.data.offset, 221 sym_op->aead.data.length, 222 sess->digest_length, 223 sess->iv.length, 224 sym_op->m_src->data_off); 225 226 /* Configure Output FLE with Scatter/Gather Entry */ 227 DPAA2_SET_FLE_SG_EXT(op_fle); 228 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 229 230 if (auth_only_len) 231 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len); 232 233 op_fle->length = (sess->dir == DIR_ENC) ? 234 (sym_op->aead.data.length + icv_len + auth_only_len) : 235 sym_op->aead.data.length + auth_only_len; 236 237 /* Configure Output SGE for Encap/Decap */ 238 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 239 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset - 240 auth_only_len); 241 sge->length = mbuf->data_len - sym_op->aead.data.offset + auth_only_len; 242 243 mbuf = mbuf->next; 244 /* o/p segs */ 245 while (mbuf) { 246 sge++; 247 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 248 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 249 sge->length = mbuf->data_len; 250 mbuf = mbuf->next; 251 } 252 sge->length -= icv_len; 253 254 if (sess->dir == DIR_ENC) { 255 sge++; 256 DPAA2_SET_FLE_ADDR(sge, 257 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data)); 258 sge->length = icv_len; 259 } 260 DPAA2_SET_FLE_FIN(sge); 261 262 sge++; 263 mbuf = sym_op->m_src; 264 265 /* Configure Input FLE with Scatter/Gather Entry */ 266 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 267 DPAA2_SET_FLE_SG_EXT(ip_fle); 268 DPAA2_SET_FLE_FIN(ip_fle); 269 ip_fle->length = (sess->dir == DIR_ENC) ? 270 (sym_op->aead.data.length + sess->iv.length + auth_only_len) : 271 (sym_op->aead.data.length + sess->iv.length + auth_only_len + 272 icv_len); 273 274 /* Configure Input SGE for Encap/Decap */ 275 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr)); 276 sge->length = sess->iv.length; 277 278 sge++; 279 if (auth_only_len) { 280 DPAA2_SET_FLE_ADDR(sge, 281 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data)); 282 sge->length = auth_only_len; 283 sge++; 284 } 285 286 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 287 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + 288 mbuf->data_off); 289 sge->length = mbuf->data_len - sym_op->aead.data.offset; 290 291 mbuf = mbuf->next; 292 /* i/p segs */ 293 while (mbuf) { 294 sge++; 295 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 296 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 297 sge->length = mbuf->data_len; 298 mbuf = mbuf->next; 299 } 300 301 if (sess->dir == DIR_DEC) { 302 sge++; 303 old_icv = (uint8_t *)(sge + 1); 304 memcpy(old_icv, sym_op->aead.digest.data, icv_len); 305 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 306 sge->length = icv_len; 307 } 308 309 DPAA2_SET_FLE_FIN(sge); 310 if (auth_only_len) { 311 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len); 312 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 313 } 314 DPAA2_SET_FD_LEN(fd, ip_fle->length); 315 316 return 0; 317 } 318 319 static inline int 320 build_authenc_gcm_fd(dpaa2_sec_session *sess, 321 struct rte_crypto_op *op, 322 struct qbman_fd *fd, uint16_t bpid) 323 { 324 struct rte_crypto_sym_op *sym_op = op->sym; 325 struct ctxt_priv *priv = sess->ctxt; 326 struct qbman_fle *fle, *sge; 327 struct sec_flow_context *flc; 328 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len; 329 int icv_len = sess->digest_length, retval; 330 uint8_t *old_icv; 331 struct rte_mbuf *dst; 332 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 333 sess->iv.offset); 334 335 PMD_INIT_FUNC_TRACE(); 336 337 if (sym_op->m_dst) 338 dst = sym_op->m_dst; 339 else 340 dst = sym_op->m_src; 341 342 /* TODO we are using the first FLE entry to store Mbuf and session ctxt. 343 * Currently we donot know which FLE has the mbuf stored. 344 * So while retreiving we can go back 1 FLE from the FD -ADDR 345 * to get the MBUF Addr from the previous FLE. 346 * We can have a better approach to use the inline Mbuf 347 */ 348 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 349 if (retval) { 350 DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE"); 351 return -1; 352 } 353 memset(fle, 0, FLE_POOL_BUF_SIZE); 354 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 355 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 356 fle = fle + 1; 357 sge = fle + 2; 358 if (likely(bpid < MAX_BPID)) { 359 DPAA2_SET_FD_BPID(fd, bpid); 360 DPAA2_SET_FLE_BPID(fle, bpid); 361 DPAA2_SET_FLE_BPID(fle + 1, bpid); 362 DPAA2_SET_FLE_BPID(sge, bpid); 363 DPAA2_SET_FLE_BPID(sge + 1, bpid); 364 DPAA2_SET_FLE_BPID(sge + 2, bpid); 365 DPAA2_SET_FLE_BPID(sge + 3, bpid); 366 } else { 367 DPAA2_SET_FD_IVP(fd); 368 DPAA2_SET_FLE_IVP(fle); 369 DPAA2_SET_FLE_IVP((fle + 1)); 370 DPAA2_SET_FLE_IVP(sge); 371 DPAA2_SET_FLE_IVP((sge + 1)); 372 DPAA2_SET_FLE_IVP((sge + 2)); 373 DPAA2_SET_FLE_IVP((sge + 3)); 374 } 375 376 /* Save the shared descriptor */ 377 flc = &priv->flc_desc[0].flc; 378 /* Configure FD as a FRAME LIST */ 379 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 380 DPAA2_SET_FD_COMPOUND_FMT(fd); 381 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 382 383 DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n" 384 "iv-len=%d data_off: 0x%x\n", 385 sym_op->aead.data.offset, 386 sym_op->aead.data.length, 387 sess->digest_length, 388 sess->iv.length, 389 sym_op->m_src->data_off); 390 391 /* Configure Output FLE with Scatter/Gather Entry */ 392 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 393 if (auth_only_len) 394 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 395 fle->length = (sess->dir == DIR_ENC) ? 396 (sym_op->aead.data.length + icv_len + auth_only_len) : 397 sym_op->aead.data.length + auth_only_len; 398 399 DPAA2_SET_FLE_SG_EXT(fle); 400 401 /* Configure Output SGE for Encap/Decap */ 402 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 403 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + 404 dst->data_off - auth_only_len); 405 sge->length = sym_op->aead.data.length + auth_only_len; 406 407 if (sess->dir == DIR_ENC) { 408 sge++; 409 DPAA2_SET_FLE_ADDR(sge, 410 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data)); 411 sge->length = sess->digest_length; 412 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length + 413 sess->iv.length + auth_only_len)); 414 } 415 DPAA2_SET_FLE_FIN(sge); 416 417 sge++; 418 fle++; 419 420 /* Configure Input FLE with Scatter/Gather Entry */ 421 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 422 DPAA2_SET_FLE_SG_EXT(fle); 423 DPAA2_SET_FLE_FIN(fle); 424 fle->length = (sess->dir == DIR_ENC) ? 425 (sym_op->aead.data.length + sess->iv.length + auth_only_len) : 426 (sym_op->aead.data.length + sess->iv.length + auth_only_len + 427 sess->digest_length); 428 429 /* Configure Input SGE for Encap/Decap */ 430 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr)); 431 sge->length = sess->iv.length; 432 sge++; 433 if (auth_only_len) { 434 DPAA2_SET_FLE_ADDR(sge, 435 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data)); 436 sge->length = auth_only_len; 437 DPAA2_SET_FLE_BPID(sge, bpid); 438 sge++; 439 } 440 441 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 442 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + 443 sym_op->m_src->data_off); 444 sge->length = sym_op->aead.data.length; 445 if (sess->dir == DIR_DEC) { 446 sge++; 447 old_icv = (uint8_t *)(sge + 1); 448 memcpy(old_icv, sym_op->aead.digest.data, 449 sess->digest_length); 450 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 451 sge->length = sess->digest_length; 452 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length + 453 sess->digest_length + 454 sess->iv.length + 455 auth_only_len)); 456 } 457 DPAA2_SET_FLE_FIN(sge); 458 459 if (auth_only_len) { 460 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 461 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 462 } 463 464 return 0; 465 } 466 467 static inline int 468 build_authenc_sg_fd(dpaa2_sec_session *sess, 469 struct rte_crypto_op *op, 470 struct qbman_fd *fd, __rte_unused uint16_t bpid) 471 { 472 struct rte_crypto_sym_op *sym_op = op->sym; 473 struct ctxt_priv *priv = sess->ctxt; 474 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 475 struct sec_flow_context *flc; 476 uint32_t auth_only_len = sym_op->auth.data.length - 477 sym_op->cipher.data.length; 478 int icv_len = sess->digest_length; 479 uint8_t *old_icv; 480 struct rte_mbuf *mbuf; 481 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 482 sess->iv.offset); 483 484 PMD_INIT_FUNC_TRACE(); 485 486 if (sym_op->m_dst) 487 mbuf = sym_op->m_dst; 488 else 489 mbuf = sym_op->m_src; 490 491 /* first FLE entry used to store mbuf and session ctxt */ 492 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE, 493 RTE_CACHE_LINE_SIZE); 494 if (unlikely(!fle)) { 495 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE"); 496 return -1; 497 } 498 memset(fle, 0, FLE_SG_MEM_SIZE); 499 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 500 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 501 502 op_fle = fle + 1; 503 ip_fle = fle + 2; 504 sge = fle + 3; 505 506 /* Save the shared descriptor */ 507 flc = &priv->flc_desc[0].flc; 508 509 /* Configure FD as a FRAME LIST */ 510 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 511 DPAA2_SET_FD_COMPOUND_FMT(fd); 512 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 513 514 DPAA2_SEC_DP_DEBUG( 515 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n" 516 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", 517 sym_op->auth.data.offset, 518 sym_op->auth.data.length, 519 sess->digest_length, 520 sym_op->cipher.data.offset, 521 sym_op->cipher.data.length, 522 sess->iv.length, 523 sym_op->m_src->data_off); 524 525 /* Configure Output FLE with Scatter/Gather Entry */ 526 DPAA2_SET_FLE_SG_EXT(op_fle); 527 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 528 529 if (auth_only_len) 530 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len); 531 532 op_fle->length = (sess->dir == DIR_ENC) ? 533 (sym_op->cipher.data.length + icv_len) : 534 sym_op->cipher.data.length; 535 536 /* Configure Output SGE for Encap/Decap */ 537 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 538 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset); 539 sge->length = mbuf->data_len - sym_op->auth.data.offset; 540 541 mbuf = mbuf->next; 542 /* o/p segs */ 543 while (mbuf) { 544 sge++; 545 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 546 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 547 sge->length = mbuf->data_len; 548 mbuf = mbuf->next; 549 } 550 sge->length -= icv_len; 551 552 if (sess->dir == DIR_ENC) { 553 sge++; 554 DPAA2_SET_FLE_ADDR(sge, 555 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 556 sge->length = icv_len; 557 } 558 DPAA2_SET_FLE_FIN(sge); 559 560 sge++; 561 mbuf = sym_op->m_src; 562 563 /* Configure Input FLE with Scatter/Gather Entry */ 564 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 565 DPAA2_SET_FLE_SG_EXT(ip_fle); 566 DPAA2_SET_FLE_FIN(ip_fle); 567 ip_fle->length = (sess->dir == DIR_ENC) ? 568 (sym_op->auth.data.length + sess->iv.length) : 569 (sym_op->auth.data.length + sess->iv.length + 570 icv_len); 571 572 /* Configure Input SGE for Encap/Decap */ 573 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 574 sge->length = sess->iv.length; 575 576 sge++; 577 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 578 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + 579 mbuf->data_off); 580 sge->length = mbuf->data_len - sym_op->auth.data.offset; 581 582 mbuf = mbuf->next; 583 /* i/p segs */ 584 while (mbuf) { 585 sge++; 586 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 587 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 588 sge->length = mbuf->data_len; 589 mbuf = mbuf->next; 590 } 591 sge->length -= icv_len; 592 593 if (sess->dir == DIR_DEC) { 594 sge++; 595 old_icv = (uint8_t *)(sge + 1); 596 memcpy(old_icv, sym_op->auth.digest.data, 597 icv_len); 598 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 599 sge->length = icv_len; 600 } 601 602 DPAA2_SET_FLE_FIN(sge); 603 if (auth_only_len) { 604 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len); 605 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 606 } 607 DPAA2_SET_FD_LEN(fd, ip_fle->length); 608 609 return 0; 610 } 611 612 static inline int 613 build_authenc_fd(dpaa2_sec_session *sess, 614 struct rte_crypto_op *op, 615 struct qbman_fd *fd, uint16_t bpid) 616 { 617 struct rte_crypto_sym_op *sym_op = op->sym; 618 struct ctxt_priv *priv = sess->ctxt; 619 struct qbman_fle *fle, *sge; 620 struct sec_flow_context *flc; 621 uint32_t auth_only_len = sym_op->auth.data.length - 622 sym_op->cipher.data.length; 623 int icv_len = sess->digest_length, retval; 624 uint8_t *old_icv; 625 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 626 sess->iv.offset); 627 struct rte_mbuf *dst; 628 629 PMD_INIT_FUNC_TRACE(); 630 631 if (sym_op->m_dst) 632 dst = sym_op->m_dst; 633 else 634 dst = sym_op->m_src; 635 636 /* we are using the first FLE entry to store Mbuf. 637 * Currently we donot know which FLE has the mbuf stored. 638 * So while retreiving we can go back 1 FLE from the FD -ADDR 639 * to get the MBUF Addr from the previous FLE. 640 * We can have a better approach to use the inline Mbuf 641 */ 642 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 643 if (retval) { 644 DPAA2_SEC_ERR("Memory alloc failed for SGE"); 645 return -1; 646 } 647 memset(fle, 0, FLE_POOL_BUF_SIZE); 648 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 649 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 650 fle = fle + 1; 651 sge = fle + 2; 652 if (likely(bpid < MAX_BPID)) { 653 DPAA2_SET_FD_BPID(fd, bpid); 654 DPAA2_SET_FLE_BPID(fle, bpid); 655 DPAA2_SET_FLE_BPID(fle + 1, bpid); 656 DPAA2_SET_FLE_BPID(sge, bpid); 657 DPAA2_SET_FLE_BPID(sge + 1, bpid); 658 DPAA2_SET_FLE_BPID(sge + 2, bpid); 659 DPAA2_SET_FLE_BPID(sge + 3, bpid); 660 } else { 661 DPAA2_SET_FD_IVP(fd); 662 DPAA2_SET_FLE_IVP(fle); 663 DPAA2_SET_FLE_IVP((fle + 1)); 664 DPAA2_SET_FLE_IVP(sge); 665 DPAA2_SET_FLE_IVP((sge + 1)); 666 DPAA2_SET_FLE_IVP((sge + 2)); 667 DPAA2_SET_FLE_IVP((sge + 3)); 668 } 669 670 /* Save the shared descriptor */ 671 flc = &priv->flc_desc[0].flc; 672 /* Configure FD as a FRAME LIST */ 673 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 674 DPAA2_SET_FD_COMPOUND_FMT(fd); 675 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 676 677 DPAA2_SEC_DP_DEBUG( 678 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n" 679 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", 680 sym_op->auth.data.offset, 681 sym_op->auth.data.length, 682 sess->digest_length, 683 sym_op->cipher.data.offset, 684 sym_op->cipher.data.length, 685 sess->iv.length, 686 sym_op->m_src->data_off); 687 688 /* Configure Output FLE with Scatter/Gather Entry */ 689 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 690 if (auth_only_len) 691 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 692 fle->length = (sess->dir == DIR_ENC) ? 693 (sym_op->cipher.data.length + icv_len) : 694 sym_op->cipher.data.length; 695 696 DPAA2_SET_FLE_SG_EXT(fle); 697 698 /* Configure Output SGE for Encap/Decap */ 699 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 700 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + 701 dst->data_off); 702 sge->length = sym_op->cipher.data.length; 703 704 if (sess->dir == DIR_ENC) { 705 sge++; 706 DPAA2_SET_FLE_ADDR(sge, 707 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 708 sge->length = sess->digest_length; 709 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 710 sess->iv.length)); 711 } 712 DPAA2_SET_FLE_FIN(sge); 713 714 sge++; 715 fle++; 716 717 /* Configure Input FLE with Scatter/Gather Entry */ 718 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 719 DPAA2_SET_FLE_SG_EXT(fle); 720 DPAA2_SET_FLE_FIN(fle); 721 fle->length = (sess->dir == DIR_ENC) ? 722 (sym_op->auth.data.length + sess->iv.length) : 723 (sym_op->auth.data.length + sess->iv.length + 724 sess->digest_length); 725 726 /* Configure Input SGE for Encap/Decap */ 727 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 728 sge->length = sess->iv.length; 729 sge++; 730 731 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 732 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + 733 sym_op->m_src->data_off); 734 sge->length = sym_op->auth.data.length; 735 if (sess->dir == DIR_DEC) { 736 sge++; 737 old_icv = (uint8_t *)(sge + 1); 738 memcpy(old_icv, sym_op->auth.digest.data, 739 sess->digest_length); 740 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 741 sge->length = sess->digest_length; 742 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 743 sess->digest_length + 744 sess->iv.length)); 745 } 746 DPAA2_SET_FLE_FIN(sge); 747 if (auth_only_len) { 748 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 749 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 750 } 751 return 0; 752 } 753 754 static inline int build_auth_sg_fd( 755 dpaa2_sec_session *sess, 756 struct rte_crypto_op *op, 757 struct qbman_fd *fd, 758 __rte_unused uint16_t bpid) 759 { 760 struct rte_crypto_sym_op *sym_op = op->sym; 761 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 762 struct sec_flow_context *flc; 763 struct ctxt_priv *priv = sess->ctxt; 764 uint8_t *old_digest; 765 struct rte_mbuf *mbuf; 766 767 PMD_INIT_FUNC_TRACE(); 768 769 mbuf = sym_op->m_src; 770 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE, 771 RTE_CACHE_LINE_SIZE); 772 if (unlikely(!fle)) { 773 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE"); 774 return -1; 775 } 776 memset(fle, 0, FLE_SG_MEM_SIZE); 777 /* first FLE entry used to store mbuf and session ctxt */ 778 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 779 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 780 op_fle = fle + 1; 781 ip_fle = fle + 2; 782 sge = fle + 3; 783 784 flc = &priv->flc_desc[DESC_INITFINAL].flc; 785 /* sg FD */ 786 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 787 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 788 DPAA2_SET_FD_COMPOUND_FMT(fd); 789 790 /* o/p fle */ 791 DPAA2_SET_FLE_ADDR(op_fle, 792 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 793 op_fle->length = sess->digest_length; 794 795 /* i/p fle */ 796 DPAA2_SET_FLE_SG_EXT(ip_fle); 797 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 798 /* i/p 1st seg */ 799 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 800 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + mbuf->data_off); 801 sge->length = mbuf->data_len - sym_op->auth.data.offset; 802 803 /* i/p segs */ 804 mbuf = mbuf->next; 805 while (mbuf) { 806 sge++; 807 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 808 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 809 sge->length = mbuf->data_len; 810 mbuf = mbuf->next; 811 } 812 if (sess->dir == DIR_ENC) { 813 /* Digest calculation case */ 814 sge->length -= sess->digest_length; 815 ip_fle->length = sym_op->auth.data.length; 816 } else { 817 /* Digest verification case */ 818 sge++; 819 old_digest = (uint8_t *)(sge + 1); 820 rte_memcpy(old_digest, sym_op->auth.digest.data, 821 sess->digest_length); 822 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest)); 823 sge->length = sess->digest_length; 824 ip_fle->length = sym_op->auth.data.length + 825 sess->digest_length; 826 } 827 DPAA2_SET_FLE_FIN(sge); 828 DPAA2_SET_FLE_FIN(ip_fle); 829 DPAA2_SET_FD_LEN(fd, ip_fle->length); 830 831 return 0; 832 } 833 834 static inline int 835 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 836 struct qbman_fd *fd, uint16_t bpid) 837 { 838 struct rte_crypto_sym_op *sym_op = op->sym; 839 struct qbman_fle *fle, *sge; 840 struct sec_flow_context *flc; 841 struct ctxt_priv *priv = sess->ctxt; 842 uint8_t *old_digest; 843 int retval; 844 845 PMD_INIT_FUNC_TRACE(); 846 847 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 848 if (retval) { 849 DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE"); 850 return -1; 851 } 852 memset(fle, 0, FLE_POOL_BUF_SIZE); 853 /* TODO we are using the first FLE entry to store Mbuf. 854 * Currently we donot know which FLE has the mbuf stored. 855 * So while retreiving we can go back 1 FLE from the FD -ADDR 856 * to get the MBUF Addr from the previous FLE. 857 * We can have a better approach to use the inline Mbuf 858 */ 859 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 860 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 861 fle = fle + 1; 862 863 if (likely(bpid < MAX_BPID)) { 864 DPAA2_SET_FD_BPID(fd, bpid); 865 DPAA2_SET_FLE_BPID(fle, bpid); 866 DPAA2_SET_FLE_BPID(fle + 1, bpid); 867 } else { 868 DPAA2_SET_FD_IVP(fd); 869 DPAA2_SET_FLE_IVP(fle); 870 DPAA2_SET_FLE_IVP((fle + 1)); 871 } 872 flc = &priv->flc_desc[DESC_INITFINAL].flc; 873 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 874 875 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 876 fle->length = sess->digest_length; 877 878 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 879 DPAA2_SET_FD_COMPOUND_FMT(fd); 880 fle++; 881 882 if (sess->dir == DIR_ENC) { 883 DPAA2_SET_FLE_ADDR(fle, 884 DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 885 DPAA2_SET_FLE_OFFSET(fle, sym_op->auth.data.offset + 886 sym_op->m_src->data_off); 887 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length); 888 fle->length = sym_op->auth.data.length; 889 } else { 890 sge = fle + 2; 891 DPAA2_SET_FLE_SG_EXT(fle); 892 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 893 894 if (likely(bpid < MAX_BPID)) { 895 DPAA2_SET_FLE_BPID(sge, bpid); 896 DPAA2_SET_FLE_BPID(sge + 1, bpid); 897 } else { 898 DPAA2_SET_FLE_IVP(sge); 899 DPAA2_SET_FLE_IVP((sge + 1)); 900 } 901 DPAA2_SET_FLE_ADDR(sge, 902 DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 903 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + 904 sym_op->m_src->data_off); 905 906 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length + 907 sess->digest_length); 908 sge->length = sym_op->auth.data.length; 909 sge++; 910 old_digest = (uint8_t *)(sge + 1); 911 rte_memcpy(old_digest, sym_op->auth.digest.data, 912 sess->digest_length); 913 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest)); 914 sge->length = sess->digest_length; 915 fle->length = sym_op->auth.data.length + 916 sess->digest_length; 917 DPAA2_SET_FLE_FIN(sge); 918 } 919 DPAA2_SET_FLE_FIN(fle); 920 921 return 0; 922 } 923 924 static int 925 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 926 struct qbman_fd *fd, __rte_unused uint16_t bpid) 927 { 928 struct rte_crypto_sym_op *sym_op = op->sym; 929 struct qbman_fle *ip_fle, *op_fle, *sge, *fle; 930 struct sec_flow_context *flc; 931 struct ctxt_priv *priv = sess->ctxt; 932 struct rte_mbuf *mbuf; 933 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 934 sess->iv.offset); 935 936 PMD_INIT_FUNC_TRACE(); 937 938 if (sym_op->m_dst) 939 mbuf = sym_op->m_dst; 940 else 941 mbuf = sym_op->m_src; 942 943 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE, 944 RTE_CACHE_LINE_SIZE); 945 if (!fle) { 946 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE"); 947 return -1; 948 } 949 memset(fle, 0, FLE_SG_MEM_SIZE); 950 /* first FLE entry used to store mbuf and session ctxt */ 951 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 952 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 953 954 op_fle = fle + 1; 955 ip_fle = fle + 2; 956 sge = fle + 3; 957 958 flc = &priv->flc_desc[0].flc; 959 960 DPAA2_SEC_DP_DEBUG( 961 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d" 962 " data_off: 0x%x\n", 963 sym_op->cipher.data.offset, 964 sym_op->cipher.data.length, 965 sess->iv.length, 966 sym_op->m_src->data_off); 967 968 /* o/p fle */ 969 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 970 op_fle->length = sym_op->cipher.data.length; 971 DPAA2_SET_FLE_SG_EXT(op_fle); 972 973 /* o/p 1st seg */ 974 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 975 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + mbuf->data_off); 976 sge->length = mbuf->data_len - sym_op->cipher.data.offset; 977 978 mbuf = mbuf->next; 979 /* o/p segs */ 980 while (mbuf) { 981 sge++; 982 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 983 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 984 sge->length = mbuf->data_len; 985 mbuf = mbuf->next; 986 } 987 DPAA2_SET_FLE_FIN(sge); 988 989 DPAA2_SEC_DP_DEBUG( 990 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n", 991 flc, fle, fle->addr_hi, fle->addr_lo, 992 fle->length); 993 994 /* i/p fle */ 995 mbuf = sym_op->m_src; 996 sge++; 997 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 998 ip_fle->length = sess->iv.length + sym_op->cipher.data.length; 999 DPAA2_SET_FLE_SG_EXT(ip_fle); 1000 1001 /* i/p IV */ 1002 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1003 DPAA2_SET_FLE_OFFSET(sge, 0); 1004 sge->length = sess->iv.length; 1005 1006 sge++; 1007 1008 /* i/p 1st seg */ 1009 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1010 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + 1011 mbuf->data_off); 1012 sge->length = mbuf->data_len - sym_op->cipher.data.offset; 1013 1014 mbuf = mbuf->next; 1015 /* i/p segs */ 1016 while (mbuf) { 1017 sge++; 1018 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1019 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 1020 sge->length = mbuf->data_len; 1021 mbuf = mbuf->next; 1022 } 1023 DPAA2_SET_FLE_FIN(sge); 1024 DPAA2_SET_FLE_FIN(ip_fle); 1025 1026 /* sg fd */ 1027 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 1028 DPAA2_SET_FD_LEN(fd, ip_fle->length); 1029 DPAA2_SET_FD_COMPOUND_FMT(fd); 1030 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1031 1032 DPAA2_SEC_DP_DEBUG( 1033 "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d" 1034 " off =%d, len =%d\n", 1035 DPAA2_GET_FD_ADDR(fd), 1036 DPAA2_GET_FD_BPID(fd), 1037 rte_dpaa2_bpid_info[bpid].meta_data_size, 1038 DPAA2_GET_FD_OFFSET(fd), 1039 DPAA2_GET_FD_LEN(fd)); 1040 return 0; 1041 } 1042 1043 static int 1044 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 1045 struct qbman_fd *fd, uint16_t bpid) 1046 { 1047 struct rte_crypto_sym_op *sym_op = op->sym; 1048 struct qbman_fle *fle, *sge; 1049 int retval; 1050 struct sec_flow_context *flc; 1051 struct ctxt_priv *priv = sess->ctxt; 1052 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1053 sess->iv.offset); 1054 struct rte_mbuf *dst; 1055 1056 PMD_INIT_FUNC_TRACE(); 1057 1058 if (sym_op->m_dst) 1059 dst = sym_op->m_dst; 1060 else 1061 dst = sym_op->m_src; 1062 1063 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 1064 if (retval) { 1065 DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE"); 1066 return -1; 1067 } 1068 memset(fle, 0, FLE_POOL_BUF_SIZE); 1069 /* TODO we are using the first FLE entry to store Mbuf. 1070 * Currently we donot know which FLE has the mbuf stored. 1071 * So while retreiving we can go back 1 FLE from the FD -ADDR 1072 * to get the MBUF Addr from the previous FLE. 1073 * We can have a better approach to use the inline Mbuf 1074 */ 1075 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 1076 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 1077 fle = fle + 1; 1078 sge = fle + 2; 1079 1080 if (likely(bpid < MAX_BPID)) { 1081 DPAA2_SET_FD_BPID(fd, bpid); 1082 DPAA2_SET_FLE_BPID(fle, bpid); 1083 DPAA2_SET_FLE_BPID(fle + 1, bpid); 1084 DPAA2_SET_FLE_BPID(sge, bpid); 1085 DPAA2_SET_FLE_BPID(sge + 1, bpid); 1086 } else { 1087 DPAA2_SET_FD_IVP(fd); 1088 DPAA2_SET_FLE_IVP(fle); 1089 DPAA2_SET_FLE_IVP((fle + 1)); 1090 DPAA2_SET_FLE_IVP(sge); 1091 DPAA2_SET_FLE_IVP((sge + 1)); 1092 } 1093 1094 flc = &priv->flc_desc[0].flc; 1095 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 1096 DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length + 1097 sess->iv.length); 1098 DPAA2_SET_FD_COMPOUND_FMT(fd); 1099 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1100 1101 DPAA2_SEC_DP_DEBUG( 1102 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d," 1103 " data_off: 0x%x\n", 1104 sym_op->cipher.data.offset, 1105 sym_op->cipher.data.length, 1106 sess->iv.length, 1107 sym_op->m_src->data_off); 1108 1109 DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 1110 DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset + 1111 dst->data_off); 1112 1113 fle->length = sym_op->cipher.data.length + sess->iv.length; 1114 1115 DPAA2_SEC_DP_DEBUG( 1116 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n", 1117 flc, fle, fle->addr_hi, fle->addr_lo, 1118 fle->length); 1119 1120 fle++; 1121 1122 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 1123 fle->length = sym_op->cipher.data.length + sess->iv.length; 1124 1125 DPAA2_SET_FLE_SG_EXT(fle); 1126 1127 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1128 sge->length = sess->iv.length; 1129 1130 sge++; 1131 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 1132 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + 1133 sym_op->m_src->data_off); 1134 1135 sge->length = sym_op->cipher.data.length; 1136 DPAA2_SET_FLE_FIN(sge); 1137 DPAA2_SET_FLE_FIN(fle); 1138 1139 DPAA2_SEC_DP_DEBUG( 1140 "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d" 1141 " off =%d, len =%d\n", 1142 DPAA2_GET_FD_ADDR(fd), 1143 DPAA2_GET_FD_BPID(fd), 1144 rte_dpaa2_bpid_info[bpid].meta_data_size, 1145 DPAA2_GET_FD_OFFSET(fd), 1146 DPAA2_GET_FD_LEN(fd)); 1147 1148 return 0; 1149 } 1150 1151 static inline int 1152 build_sec_fd(struct rte_crypto_op *op, 1153 struct qbman_fd *fd, uint16_t bpid) 1154 { 1155 int ret = -1; 1156 dpaa2_sec_session *sess; 1157 1158 PMD_INIT_FUNC_TRACE(); 1159 1160 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) 1161 sess = (dpaa2_sec_session *)get_sym_session_private_data( 1162 op->sym->session, cryptodev_driver_id); 1163 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) 1164 sess = (dpaa2_sec_session *)get_sec_session_private_data( 1165 op->sym->sec_session); 1166 else 1167 return -1; 1168 1169 /* Segmented buffer */ 1170 if (unlikely(!rte_pktmbuf_is_contiguous(op->sym->m_src))) { 1171 switch (sess->ctxt_type) { 1172 case DPAA2_SEC_CIPHER: 1173 ret = build_cipher_sg_fd(sess, op, fd, bpid); 1174 break; 1175 case DPAA2_SEC_AUTH: 1176 ret = build_auth_sg_fd(sess, op, fd, bpid); 1177 break; 1178 case DPAA2_SEC_AEAD: 1179 ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid); 1180 break; 1181 case DPAA2_SEC_CIPHER_HASH: 1182 ret = build_authenc_sg_fd(sess, op, fd, bpid); 1183 break; 1184 case DPAA2_SEC_HASH_CIPHER: 1185 default: 1186 DPAA2_SEC_ERR("error: Unsupported session"); 1187 } 1188 } else { 1189 switch (sess->ctxt_type) { 1190 case DPAA2_SEC_CIPHER: 1191 ret = build_cipher_fd(sess, op, fd, bpid); 1192 break; 1193 case DPAA2_SEC_AUTH: 1194 ret = build_auth_fd(sess, op, fd, bpid); 1195 break; 1196 case DPAA2_SEC_AEAD: 1197 ret = build_authenc_gcm_fd(sess, op, fd, bpid); 1198 break; 1199 case DPAA2_SEC_CIPHER_HASH: 1200 ret = build_authenc_fd(sess, op, fd, bpid); 1201 break; 1202 case DPAA2_SEC_IPSEC: 1203 ret = build_proto_fd(sess, op, fd, bpid); 1204 break; 1205 case DPAA2_SEC_PDCP: 1206 ret = build_proto_compound_fd(sess, op, fd, bpid); 1207 break; 1208 case DPAA2_SEC_HASH_CIPHER: 1209 default: 1210 DPAA2_SEC_ERR("error: Unsupported session"); 1211 } 1212 } 1213 return ret; 1214 } 1215 1216 static uint16_t 1217 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, 1218 uint16_t nb_ops) 1219 { 1220 /* Function to transmit the frames to given device and VQ*/ 1221 uint32_t loop; 1222 int32_t ret; 1223 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 1224 uint32_t frames_to_send; 1225 struct qbman_eq_desc eqdesc; 1226 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 1227 struct qbman_swp *swp; 1228 uint16_t num_tx = 0; 1229 uint32_t flags[MAX_TX_RING_SLOTS] = {0}; 1230 /*todo - need to support multiple buffer pools */ 1231 uint16_t bpid; 1232 struct rte_mempool *mb_pool; 1233 1234 if (unlikely(nb_ops == 0)) 1235 return 0; 1236 1237 if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { 1238 DPAA2_SEC_ERR("sessionless crypto op not supported"); 1239 return 0; 1240 } 1241 /*Prepare enqueue descriptor*/ 1242 qbman_eq_desc_clear(&eqdesc); 1243 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); 1244 qbman_eq_desc_set_response(&eqdesc, 0, 0); 1245 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid); 1246 1247 if (!DPAA2_PER_LCORE_DPIO) { 1248 ret = dpaa2_affine_qbman_swp(); 1249 if (ret) { 1250 DPAA2_SEC_ERR("Failure in affining portal"); 1251 return 0; 1252 } 1253 } 1254 swp = DPAA2_PER_LCORE_PORTAL; 1255 1256 while (nb_ops) { 1257 frames_to_send = (nb_ops > dpaa2_eqcr_size) ? 1258 dpaa2_eqcr_size : nb_ops; 1259 1260 for (loop = 0; loop < frames_to_send; loop++) { 1261 if ((*ops)->sym->m_src->seqn) { 1262 uint8_t dqrr_index = (*ops)->sym->m_src->seqn - 1; 1263 1264 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index; 1265 DPAA2_PER_LCORE_DQRR_SIZE--; 1266 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index); 1267 (*ops)->sym->m_src->seqn = DPAA2_INVALID_MBUF_SEQN; 1268 } 1269 1270 /*Clear the unused FD fields before sending*/ 1271 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd)); 1272 mb_pool = (*ops)->sym->m_src->pool; 1273 bpid = mempool_to_bpid(mb_pool); 1274 ret = build_sec_fd(*ops, &fd_arr[loop], bpid); 1275 if (ret) { 1276 DPAA2_SEC_ERR("error: Improper packet contents" 1277 " for crypto operation"); 1278 goto skip_tx; 1279 } 1280 ops++; 1281 } 1282 loop = 0; 1283 while (loop < frames_to_send) { 1284 loop += qbman_swp_enqueue_multiple(swp, &eqdesc, 1285 &fd_arr[loop], 1286 &flags[loop], 1287 frames_to_send - loop); 1288 } 1289 1290 num_tx += frames_to_send; 1291 nb_ops -= frames_to_send; 1292 } 1293 skip_tx: 1294 dpaa2_qp->tx_vq.tx_pkts += num_tx; 1295 dpaa2_qp->tx_vq.err_pkts += nb_ops; 1296 return num_tx; 1297 } 1298 1299 static inline struct rte_crypto_op * 1300 sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id) 1301 { 1302 struct rte_crypto_op *op; 1303 uint16_t len = DPAA2_GET_FD_LEN(fd); 1304 uint16_t diff = 0; 1305 dpaa2_sec_session *sess_priv; 1306 1307 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF( 1308 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)), 1309 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 1310 1311 diff = len - mbuf->pkt_len; 1312 mbuf->pkt_len += diff; 1313 mbuf->data_len += diff; 1314 op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova; 1315 mbuf->buf_iova = op->sym->aead.digest.phys_addr; 1316 op->sym->aead.digest.phys_addr = 0L; 1317 1318 sess_priv = (dpaa2_sec_session *)get_sec_session_private_data( 1319 op->sym->sec_session); 1320 if (sess_priv->dir == DIR_ENC) 1321 mbuf->data_off += SEC_FLC_DHR_OUTBOUND; 1322 else 1323 mbuf->data_off += SEC_FLC_DHR_INBOUND; 1324 1325 return op; 1326 } 1327 1328 static inline struct rte_crypto_op * 1329 sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id) 1330 { 1331 struct qbman_fle *fle; 1332 struct rte_crypto_op *op; 1333 struct ctxt_priv *priv; 1334 struct rte_mbuf *dst, *src; 1335 1336 if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single) 1337 return sec_simple_fd_to_mbuf(fd, driver_id); 1338 1339 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); 1340 1341 DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n", 1342 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset); 1343 1344 /* we are using the first FLE entry to store Mbuf. 1345 * Currently we donot know which FLE has the mbuf stored. 1346 * So while retreiving we can go back 1 FLE from the FD -ADDR 1347 * to get the MBUF Addr from the previous FLE. 1348 * We can have a better approach to use the inline Mbuf 1349 */ 1350 1351 if (unlikely(DPAA2_GET_FD_IVP(fd))) { 1352 /* TODO complete it. */ 1353 DPAA2_SEC_ERR("error: non inline buffer"); 1354 return NULL; 1355 } 1356 op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1)); 1357 1358 /* Prefeth op */ 1359 src = op->sym->m_src; 1360 rte_prefetch0(src); 1361 1362 if (op->sym->m_dst) { 1363 dst = op->sym->m_dst; 1364 rte_prefetch0(dst); 1365 } else 1366 dst = src; 1367 1368 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 1369 dpaa2_sec_session *sess = (dpaa2_sec_session *) 1370 get_sec_session_private_data(op->sym->sec_session); 1371 if (sess->ctxt_type == DPAA2_SEC_IPSEC) { 1372 uint16_t len = DPAA2_GET_FD_LEN(fd); 1373 dst->pkt_len = len; 1374 dst->data_len = len; 1375 } 1376 } 1377 1378 DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p," 1379 " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n", 1380 (void *)dst, 1381 dst->buf_addr, 1382 DPAA2_GET_FD_ADDR(fd), 1383 DPAA2_GET_FD_BPID(fd), 1384 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, 1385 DPAA2_GET_FD_OFFSET(fd), 1386 DPAA2_GET_FD_LEN(fd)); 1387 1388 /* free the fle memory */ 1389 if (likely(rte_pktmbuf_is_contiguous(src))) { 1390 priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1); 1391 rte_mempool_put(priv->fle_pool, (void *)(fle-1)); 1392 } else 1393 rte_free((void *)(fle-1)); 1394 1395 return op; 1396 } 1397 1398 static uint16_t 1399 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, 1400 uint16_t nb_ops) 1401 { 1402 /* Function is responsible to receive frames for a given device and VQ*/ 1403 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 1404 struct rte_cryptodev *dev = 1405 (struct rte_cryptodev *)(dpaa2_qp->rx_vq.dev); 1406 struct qbman_result *dq_storage; 1407 uint32_t fqid = dpaa2_qp->rx_vq.fqid; 1408 int ret, num_rx = 0; 1409 uint8_t is_last = 0, status; 1410 struct qbman_swp *swp; 1411 const struct qbman_fd *fd; 1412 struct qbman_pull_desc pulldesc; 1413 1414 if (!DPAA2_PER_LCORE_DPIO) { 1415 ret = dpaa2_affine_qbman_swp(); 1416 if (ret) { 1417 DPAA2_SEC_ERR("Failure in affining portal"); 1418 return 0; 1419 } 1420 } 1421 swp = DPAA2_PER_LCORE_PORTAL; 1422 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0]; 1423 1424 qbman_pull_desc_clear(&pulldesc); 1425 qbman_pull_desc_set_numframes(&pulldesc, 1426 (nb_ops > dpaa2_dqrr_size) ? 1427 dpaa2_dqrr_size : nb_ops); 1428 qbman_pull_desc_set_fq(&pulldesc, fqid); 1429 qbman_pull_desc_set_storage(&pulldesc, dq_storage, 1430 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage), 1431 1); 1432 1433 /*Issue a volatile dequeue command. */ 1434 while (1) { 1435 if (qbman_swp_pull(swp, &pulldesc)) { 1436 DPAA2_SEC_WARN( 1437 "SEC VDQ command is not issued : QBMAN busy"); 1438 /* Portal was busy, try again */ 1439 continue; 1440 } 1441 break; 1442 }; 1443 1444 /* Receive the packets till Last Dequeue entry is found with 1445 * respect to the above issues PULL command. 1446 */ 1447 while (!is_last) { 1448 /* Check if the previous issued command is completed. 1449 * Also seems like the SWP is shared between the Ethernet Driver 1450 * and the SEC driver. 1451 */ 1452 while (!qbman_check_command_complete(dq_storage)) 1453 ; 1454 1455 /* Loop until the dq_storage is updated with 1456 * new token by QBMAN 1457 */ 1458 while (!qbman_check_new_result(dq_storage)) 1459 ; 1460 /* Check whether Last Pull command is Expired and 1461 * setting Condition for Loop termination 1462 */ 1463 if (qbman_result_DQ_is_pull_complete(dq_storage)) { 1464 is_last = 1; 1465 /* Check for valid frame. */ 1466 status = (uint8_t)qbman_result_DQ_flags(dq_storage); 1467 if (unlikely( 1468 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) { 1469 DPAA2_SEC_DP_DEBUG("No frame is delivered\n"); 1470 continue; 1471 } 1472 } 1473 1474 fd = qbman_result_DQ_fd(dq_storage); 1475 ops[num_rx] = sec_fd_to_mbuf(fd, dev->driver_id); 1476 1477 if (unlikely(fd->simple.frc)) { 1478 /* TODO Parse SEC errors */ 1479 DPAA2_SEC_ERR("SEC returned Error - %x", 1480 fd->simple.frc); 1481 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR; 1482 } else { 1483 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 1484 } 1485 1486 num_rx++; 1487 dq_storage++; 1488 } /* End of Packet Rx loop */ 1489 1490 dpaa2_qp->rx_vq.rx_pkts += num_rx; 1491 1492 DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx); 1493 /*Return the total number of packets received to DPAA2 app*/ 1494 return num_rx; 1495 } 1496 1497 /** Release queue pair */ 1498 static int 1499 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) 1500 { 1501 struct dpaa2_sec_qp *qp = 1502 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id]; 1503 1504 PMD_INIT_FUNC_TRACE(); 1505 1506 if (qp->rx_vq.q_storage) { 1507 dpaa2_free_dq_storage(qp->rx_vq.q_storage); 1508 rte_free(qp->rx_vq.q_storage); 1509 } 1510 rte_free(qp); 1511 1512 dev->data->queue_pairs[queue_pair_id] = NULL; 1513 1514 return 0; 1515 } 1516 1517 /** Setup a queue pair */ 1518 static int 1519 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, 1520 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf, 1521 __rte_unused int socket_id, 1522 __rte_unused struct rte_mempool *session_pool) 1523 { 1524 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 1525 struct dpaa2_sec_qp *qp; 1526 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 1527 struct dpseci_rx_queue_cfg cfg; 1528 int32_t retcode; 1529 1530 PMD_INIT_FUNC_TRACE(); 1531 1532 /* If qp is already in use free ring memory and qp metadata. */ 1533 if (dev->data->queue_pairs[qp_id] != NULL) { 1534 DPAA2_SEC_INFO("QP already setup"); 1535 return 0; 1536 } 1537 1538 DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p", 1539 dev, qp_id, qp_conf); 1540 1541 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 1542 1543 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp), 1544 RTE_CACHE_LINE_SIZE); 1545 if (!qp) { 1546 DPAA2_SEC_ERR("malloc failed for rx/tx queues"); 1547 return -1; 1548 } 1549 1550 qp->rx_vq.dev = dev; 1551 qp->tx_vq.dev = dev; 1552 qp->rx_vq.q_storage = rte_malloc("sec dq storage", 1553 sizeof(struct queue_storage_info_t), 1554 RTE_CACHE_LINE_SIZE); 1555 if (!qp->rx_vq.q_storage) { 1556 DPAA2_SEC_ERR("malloc failed for q_storage"); 1557 return -1; 1558 } 1559 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t)); 1560 1561 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) { 1562 DPAA2_SEC_ERR("Unable to allocate dequeue storage"); 1563 return -1; 1564 } 1565 1566 dev->data->queue_pairs[qp_id] = qp; 1567 1568 cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX; 1569 cfg.user_ctx = (size_t)(&qp->rx_vq); 1570 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 1571 qp_id, &cfg); 1572 return retcode; 1573 } 1574 1575 /** Return the number of allocated queue pairs */ 1576 static uint32_t 1577 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev) 1578 { 1579 PMD_INIT_FUNC_TRACE(); 1580 1581 return dev->data->nb_queue_pairs; 1582 } 1583 1584 /** Returns the size of the aesni gcm session structure */ 1585 static unsigned int 1586 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) 1587 { 1588 PMD_INIT_FUNC_TRACE(); 1589 1590 return sizeof(dpaa2_sec_session); 1591 } 1592 1593 static int 1594 dpaa2_sec_cipher_init(struct rte_cryptodev *dev, 1595 struct rte_crypto_sym_xform *xform, 1596 dpaa2_sec_session *session) 1597 { 1598 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1599 struct alginfo cipherdata; 1600 int bufsize, i; 1601 struct ctxt_priv *priv; 1602 struct sec_flow_context *flc; 1603 1604 PMD_INIT_FUNC_TRACE(); 1605 1606 /* For SEC CIPHER only one descriptor is required. */ 1607 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1608 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 1609 RTE_CACHE_LINE_SIZE); 1610 if (priv == NULL) { 1611 DPAA2_SEC_ERR("No Memory for priv CTXT"); 1612 return -1; 1613 } 1614 1615 priv->fle_pool = dev_priv->fle_pool; 1616 1617 flc = &priv->flc_desc[0].flc; 1618 1619 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, 1620 RTE_CACHE_LINE_SIZE); 1621 if (session->cipher_key.data == NULL) { 1622 DPAA2_SEC_ERR("No Memory for cipher key"); 1623 rte_free(priv); 1624 return -1; 1625 } 1626 session->cipher_key.length = xform->cipher.key.length; 1627 1628 memcpy(session->cipher_key.data, xform->cipher.key.data, 1629 xform->cipher.key.length); 1630 cipherdata.key = (size_t)session->cipher_key.data; 1631 cipherdata.keylen = session->cipher_key.length; 1632 cipherdata.key_enc_flags = 0; 1633 cipherdata.key_type = RTA_DATA_IMM; 1634 1635 /* Set IV parameters */ 1636 session->iv.offset = xform->cipher.iv.offset; 1637 session->iv.length = xform->cipher.iv.length; 1638 1639 switch (xform->cipher.algo) { 1640 case RTE_CRYPTO_CIPHER_AES_CBC: 1641 cipherdata.algtype = OP_ALG_ALGSEL_AES; 1642 cipherdata.algmode = OP_ALG_AAI_CBC; 1643 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 1644 break; 1645 case RTE_CRYPTO_CIPHER_3DES_CBC: 1646 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 1647 cipherdata.algmode = OP_ALG_AAI_CBC; 1648 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 1649 break; 1650 case RTE_CRYPTO_CIPHER_AES_CTR: 1651 cipherdata.algtype = OP_ALG_ALGSEL_AES; 1652 cipherdata.algmode = OP_ALG_AAI_CTR; 1653 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 1654 break; 1655 case RTE_CRYPTO_CIPHER_3DES_CTR: 1656 case RTE_CRYPTO_CIPHER_AES_ECB: 1657 case RTE_CRYPTO_CIPHER_3DES_ECB: 1658 case RTE_CRYPTO_CIPHER_AES_XTS: 1659 case RTE_CRYPTO_CIPHER_AES_F8: 1660 case RTE_CRYPTO_CIPHER_ARC4: 1661 case RTE_CRYPTO_CIPHER_KASUMI_F8: 1662 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 1663 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 1664 case RTE_CRYPTO_CIPHER_NULL: 1665 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", 1666 xform->cipher.algo); 1667 goto error_out; 1668 default: 1669 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 1670 xform->cipher.algo); 1671 goto error_out; 1672 } 1673 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1674 DIR_ENC : DIR_DEC; 1675 1676 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 1677 &cipherdata, NULL, session->iv.length, 1678 session->dir); 1679 if (bufsize < 0) { 1680 DPAA2_SEC_ERR("Crypto: Descriptor build failed"); 1681 goto error_out; 1682 } 1683 flc->dhr = 0; 1684 flc->bpv0 = 0x1; 1685 flc->mode_bits = 0x8000; 1686 1687 flc->word1_sdl = (uint8_t)bufsize; 1688 flc->word2_rflc_31_0 = lower_32_bits( 1689 (size_t)&(((struct dpaa2_sec_qp *) 1690 dev->data->queue_pairs[0])->rx_vq)); 1691 flc->word3_rflc_63_32 = upper_32_bits( 1692 (size_t)&(((struct dpaa2_sec_qp *) 1693 dev->data->queue_pairs[0])->rx_vq)); 1694 session->ctxt = priv; 1695 1696 for (i = 0; i < bufsize; i++) 1697 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]); 1698 1699 return 0; 1700 1701 error_out: 1702 rte_free(session->cipher_key.data); 1703 rte_free(priv); 1704 return -1; 1705 } 1706 1707 static int 1708 dpaa2_sec_auth_init(struct rte_cryptodev *dev, 1709 struct rte_crypto_sym_xform *xform, 1710 dpaa2_sec_session *session) 1711 { 1712 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1713 struct alginfo authdata; 1714 int bufsize, i; 1715 struct ctxt_priv *priv; 1716 struct sec_flow_context *flc; 1717 1718 PMD_INIT_FUNC_TRACE(); 1719 1720 /* For SEC AUTH three descriptors are required for various stages */ 1721 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1722 sizeof(struct ctxt_priv) + 3 * 1723 sizeof(struct sec_flc_desc), 1724 RTE_CACHE_LINE_SIZE); 1725 if (priv == NULL) { 1726 DPAA2_SEC_ERR("No Memory for priv CTXT"); 1727 return -1; 1728 } 1729 1730 priv->fle_pool = dev_priv->fle_pool; 1731 flc = &priv->flc_desc[DESC_INITFINAL].flc; 1732 1733 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length, 1734 RTE_CACHE_LINE_SIZE); 1735 if (session->auth_key.data == NULL) { 1736 DPAA2_SEC_ERR("Unable to allocate memory for auth key"); 1737 rte_free(priv); 1738 return -1; 1739 } 1740 session->auth_key.length = xform->auth.key.length; 1741 1742 memcpy(session->auth_key.data, xform->auth.key.data, 1743 xform->auth.key.length); 1744 authdata.key = (size_t)session->auth_key.data; 1745 authdata.keylen = session->auth_key.length; 1746 authdata.key_enc_flags = 0; 1747 authdata.key_type = RTA_DATA_IMM; 1748 1749 session->digest_length = xform->auth.digest_length; 1750 1751 switch (xform->auth.algo) { 1752 case RTE_CRYPTO_AUTH_SHA1_HMAC: 1753 authdata.algtype = OP_ALG_ALGSEL_SHA1; 1754 authdata.algmode = OP_ALG_AAI_HMAC; 1755 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 1756 break; 1757 case RTE_CRYPTO_AUTH_MD5_HMAC: 1758 authdata.algtype = OP_ALG_ALGSEL_MD5; 1759 authdata.algmode = OP_ALG_AAI_HMAC; 1760 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 1761 break; 1762 case RTE_CRYPTO_AUTH_SHA256_HMAC: 1763 authdata.algtype = OP_ALG_ALGSEL_SHA256; 1764 authdata.algmode = OP_ALG_AAI_HMAC; 1765 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 1766 break; 1767 case RTE_CRYPTO_AUTH_SHA384_HMAC: 1768 authdata.algtype = OP_ALG_ALGSEL_SHA384; 1769 authdata.algmode = OP_ALG_AAI_HMAC; 1770 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 1771 break; 1772 case RTE_CRYPTO_AUTH_SHA512_HMAC: 1773 authdata.algtype = OP_ALG_ALGSEL_SHA512; 1774 authdata.algmode = OP_ALG_AAI_HMAC; 1775 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 1776 break; 1777 case RTE_CRYPTO_AUTH_SHA224_HMAC: 1778 authdata.algtype = OP_ALG_ALGSEL_SHA224; 1779 authdata.algmode = OP_ALG_AAI_HMAC; 1780 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 1781 break; 1782 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 1783 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 1784 case RTE_CRYPTO_AUTH_NULL: 1785 case RTE_CRYPTO_AUTH_SHA1: 1786 case RTE_CRYPTO_AUTH_SHA256: 1787 case RTE_CRYPTO_AUTH_SHA512: 1788 case RTE_CRYPTO_AUTH_SHA224: 1789 case RTE_CRYPTO_AUTH_SHA384: 1790 case RTE_CRYPTO_AUTH_MD5: 1791 case RTE_CRYPTO_AUTH_AES_GMAC: 1792 case RTE_CRYPTO_AUTH_KASUMI_F9: 1793 case RTE_CRYPTO_AUTH_AES_CMAC: 1794 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 1795 case RTE_CRYPTO_AUTH_ZUC_EIA3: 1796 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un", 1797 xform->auth.algo); 1798 goto error_out; 1799 default: 1800 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", 1801 xform->auth.algo); 1802 goto error_out; 1803 } 1804 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? 1805 DIR_ENC : DIR_DEC; 1806 1807 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 1808 1, 0, &authdata, !session->dir, 1809 session->digest_length); 1810 if (bufsize < 0) { 1811 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 1812 goto error_out; 1813 } 1814 1815 flc->word1_sdl = (uint8_t)bufsize; 1816 flc->word2_rflc_31_0 = lower_32_bits( 1817 (size_t)&(((struct dpaa2_sec_qp *) 1818 dev->data->queue_pairs[0])->rx_vq)); 1819 flc->word3_rflc_63_32 = upper_32_bits( 1820 (size_t)&(((struct dpaa2_sec_qp *) 1821 dev->data->queue_pairs[0])->rx_vq)); 1822 session->ctxt = priv; 1823 for (i = 0; i < bufsize; i++) 1824 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", 1825 i, priv->flc_desc[DESC_INITFINAL].desc[i]); 1826 1827 1828 return 0; 1829 1830 error_out: 1831 rte_free(session->auth_key.data); 1832 rte_free(priv); 1833 return -1; 1834 } 1835 1836 static int 1837 dpaa2_sec_aead_init(struct rte_cryptodev *dev, 1838 struct rte_crypto_sym_xform *xform, 1839 dpaa2_sec_session *session) 1840 { 1841 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt; 1842 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1843 struct alginfo aeaddata; 1844 int bufsize, i; 1845 struct ctxt_priv *priv; 1846 struct sec_flow_context *flc; 1847 struct rte_crypto_aead_xform *aead_xform = &xform->aead; 1848 int err; 1849 1850 PMD_INIT_FUNC_TRACE(); 1851 1852 /* Set IV parameters */ 1853 session->iv.offset = aead_xform->iv.offset; 1854 session->iv.length = aead_xform->iv.length; 1855 session->ctxt_type = DPAA2_SEC_AEAD; 1856 1857 /* For SEC AEAD only one descriptor is required */ 1858 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1859 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 1860 RTE_CACHE_LINE_SIZE); 1861 if (priv == NULL) { 1862 DPAA2_SEC_ERR("No Memory for priv CTXT"); 1863 return -1; 1864 } 1865 1866 priv->fle_pool = dev_priv->fle_pool; 1867 flc = &priv->flc_desc[0].flc; 1868 1869 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 1870 RTE_CACHE_LINE_SIZE); 1871 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 1872 DPAA2_SEC_ERR("No Memory for aead key"); 1873 rte_free(priv); 1874 return -1; 1875 } 1876 memcpy(session->aead_key.data, aead_xform->key.data, 1877 aead_xform->key.length); 1878 1879 session->digest_length = aead_xform->digest_length; 1880 session->aead_key.length = aead_xform->key.length; 1881 ctxt->auth_only_len = aead_xform->aad_length; 1882 1883 aeaddata.key = (size_t)session->aead_key.data; 1884 aeaddata.keylen = session->aead_key.length; 1885 aeaddata.key_enc_flags = 0; 1886 aeaddata.key_type = RTA_DATA_IMM; 1887 1888 switch (aead_xform->algo) { 1889 case RTE_CRYPTO_AEAD_AES_GCM: 1890 aeaddata.algtype = OP_ALG_ALGSEL_AES; 1891 aeaddata.algmode = OP_ALG_AAI_GCM; 1892 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; 1893 break; 1894 case RTE_CRYPTO_AEAD_AES_CCM: 1895 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u", 1896 aead_xform->algo); 1897 goto error_out; 1898 default: 1899 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u", 1900 aead_xform->algo); 1901 goto error_out; 1902 } 1903 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 1904 DIR_ENC : DIR_DEC; 1905 1906 priv->flc_desc[0].desc[0] = aeaddata.keylen; 1907 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 1908 MIN_JOB_DESC_SIZE, 1909 (unsigned int *)priv->flc_desc[0].desc, 1910 &priv->flc_desc[0].desc[1], 1); 1911 1912 if (err < 0) { 1913 DPAA2_SEC_ERR("Crypto: Incorrect key lengths"); 1914 goto error_out; 1915 } 1916 if (priv->flc_desc[0].desc[1] & 1) { 1917 aeaddata.key_type = RTA_DATA_IMM; 1918 } else { 1919 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key); 1920 aeaddata.key_type = RTA_DATA_PTR; 1921 } 1922 priv->flc_desc[0].desc[0] = 0; 1923 priv->flc_desc[0].desc[1] = 0; 1924 1925 if (session->dir == DIR_ENC) 1926 bufsize = cnstr_shdsc_gcm_encap( 1927 priv->flc_desc[0].desc, 1, 0, 1928 &aeaddata, session->iv.length, 1929 session->digest_length); 1930 else 1931 bufsize = cnstr_shdsc_gcm_decap( 1932 priv->flc_desc[0].desc, 1, 0, 1933 &aeaddata, session->iv.length, 1934 session->digest_length); 1935 if (bufsize < 0) { 1936 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 1937 goto error_out; 1938 } 1939 1940 flc->word1_sdl = (uint8_t)bufsize; 1941 flc->word2_rflc_31_0 = lower_32_bits( 1942 (size_t)&(((struct dpaa2_sec_qp *) 1943 dev->data->queue_pairs[0])->rx_vq)); 1944 flc->word3_rflc_63_32 = upper_32_bits( 1945 (size_t)&(((struct dpaa2_sec_qp *) 1946 dev->data->queue_pairs[0])->rx_vq)); 1947 session->ctxt = priv; 1948 for (i = 0; i < bufsize; i++) 1949 DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n", 1950 i, priv->flc_desc[0].desc[i]); 1951 1952 return 0; 1953 1954 error_out: 1955 rte_free(session->aead_key.data); 1956 rte_free(priv); 1957 return -1; 1958 } 1959 1960 1961 static int 1962 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, 1963 struct rte_crypto_sym_xform *xform, 1964 dpaa2_sec_session *session) 1965 { 1966 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt; 1967 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1968 struct alginfo authdata, cipherdata; 1969 int bufsize, i; 1970 struct ctxt_priv *priv; 1971 struct sec_flow_context *flc; 1972 struct rte_crypto_cipher_xform *cipher_xform; 1973 struct rte_crypto_auth_xform *auth_xform; 1974 int err; 1975 1976 PMD_INIT_FUNC_TRACE(); 1977 1978 if (session->ext_params.aead_ctxt.auth_cipher_text) { 1979 cipher_xform = &xform->cipher; 1980 auth_xform = &xform->next->auth; 1981 session->ctxt_type = 1982 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1983 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER; 1984 } else { 1985 cipher_xform = &xform->next->cipher; 1986 auth_xform = &xform->auth; 1987 session->ctxt_type = 1988 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1989 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH; 1990 } 1991 1992 /* Set IV parameters */ 1993 session->iv.offset = cipher_xform->iv.offset; 1994 session->iv.length = cipher_xform->iv.length; 1995 1996 /* For SEC AEAD only one descriptor is required */ 1997 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1998 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 1999 RTE_CACHE_LINE_SIZE); 2000 if (priv == NULL) { 2001 DPAA2_SEC_ERR("No Memory for priv CTXT"); 2002 return -1; 2003 } 2004 2005 priv->fle_pool = dev_priv->fle_pool; 2006 flc = &priv->flc_desc[0].flc; 2007 2008 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length, 2009 RTE_CACHE_LINE_SIZE); 2010 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) { 2011 DPAA2_SEC_ERR("No Memory for cipher key"); 2012 rte_free(priv); 2013 return -1; 2014 } 2015 session->cipher_key.length = cipher_xform->key.length; 2016 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length, 2017 RTE_CACHE_LINE_SIZE); 2018 if (session->auth_key.data == NULL && auth_xform->key.length > 0) { 2019 DPAA2_SEC_ERR("No Memory for auth key"); 2020 rte_free(session->cipher_key.data); 2021 rte_free(priv); 2022 return -1; 2023 } 2024 session->auth_key.length = auth_xform->key.length; 2025 memcpy(session->cipher_key.data, cipher_xform->key.data, 2026 cipher_xform->key.length); 2027 memcpy(session->auth_key.data, auth_xform->key.data, 2028 auth_xform->key.length); 2029 2030 authdata.key = (size_t)session->auth_key.data; 2031 authdata.keylen = session->auth_key.length; 2032 authdata.key_enc_flags = 0; 2033 authdata.key_type = RTA_DATA_IMM; 2034 2035 session->digest_length = auth_xform->digest_length; 2036 2037 switch (auth_xform->algo) { 2038 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2039 authdata.algtype = OP_ALG_ALGSEL_SHA1; 2040 authdata.algmode = OP_ALG_AAI_HMAC; 2041 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 2042 break; 2043 case RTE_CRYPTO_AUTH_MD5_HMAC: 2044 authdata.algtype = OP_ALG_ALGSEL_MD5; 2045 authdata.algmode = OP_ALG_AAI_HMAC; 2046 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 2047 break; 2048 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2049 authdata.algtype = OP_ALG_ALGSEL_SHA224; 2050 authdata.algmode = OP_ALG_AAI_HMAC; 2051 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 2052 break; 2053 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2054 authdata.algtype = OP_ALG_ALGSEL_SHA256; 2055 authdata.algmode = OP_ALG_AAI_HMAC; 2056 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 2057 break; 2058 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2059 authdata.algtype = OP_ALG_ALGSEL_SHA384; 2060 authdata.algmode = OP_ALG_AAI_HMAC; 2061 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 2062 break; 2063 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2064 authdata.algtype = OP_ALG_ALGSEL_SHA512; 2065 authdata.algmode = OP_ALG_AAI_HMAC; 2066 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 2067 break; 2068 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2069 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2070 case RTE_CRYPTO_AUTH_NULL: 2071 case RTE_CRYPTO_AUTH_SHA1: 2072 case RTE_CRYPTO_AUTH_SHA256: 2073 case RTE_CRYPTO_AUTH_SHA512: 2074 case RTE_CRYPTO_AUTH_SHA224: 2075 case RTE_CRYPTO_AUTH_SHA384: 2076 case RTE_CRYPTO_AUTH_MD5: 2077 case RTE_CRYPTO_AUTH_AES_GMAC: 2078 case RTE_CRYPTO_AUTH_KASUMI_F9: 2079 case RTE_CRYPTO_AUTH_AES_CMAC: 2080 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 2081 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2082 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 2083 auth_xform->algo); 2084 goto error_out; 2085 default: 2086 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", 2087 auth_xform->algo); 2088 goto error_out; 2089 } 2090 cipherdata.key = (size_t)session->cipher_key.data; 2091 cipherdata.keylen = session->cipher_key.length; 2092 cipherdata.key_enc_flags = 0; 2093 cipherdata.key_type = RTA_DATA_IMM; 2094 2095 switch (cipher_xform->algo) { 2096 case RTE_CRYPTO_CIPHER_AES_CBC: 2097 cipherdata.algtype = OP_ALG_ALGSEL_AES; 2098 cipherdata.algmode = OP_ALG_AAI_CBC; 2099 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 2100 break; 2101 case RTE_CRYPTO_CIPHER_3DES_CBC: 2102 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 2103 cipherdata.algmode = OP_ALG_AAI_CBC; 2104 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 2105 break; 2106 case RTE_CRYPTO_CIPHER_AES_CTR: 2107 cipherdata.algtype = OP_ALG_ALGSEL_AES; 2108 cipherdata.algmode = OP_ALG_AAI_CTR; 2109 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 2110 break; 2111 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2112 case RTE_CRYPTO_CIPHER_NULL: 2113 case RTE_CRYPTO_CIPHER_3DES_ECB: 2114 case RTE_CRYPTO_CIPHER_AES_ECB: 2115 case RTE_CRYPTO_CIPHER_KASUMI_F8: 2116 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", 2117 cipher_xform->algo); 2118 goto error_out; 2119 default: 2120 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 2121 cipher_xform->algo); 2122 goto error_out; 2123 } 2124 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2125 DIR_ENC : DIR_DEC; 2126 2127 priv->flc_desc[0].desc[0] = cipherdata.keylen; 2128 priv->flc_desc[0].desc[1] = authdata.keylen; 2129 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 2130 MIN_JOB_DESC_SIZE, 2131 (unsigned int *)priv->flc_desc[0].desc, 2132 &priv->flc_desc[0].desc[2], 2); 2133 2134 if (err < 0) { 2135 DPAA2_SEC_ERR("Crypto: Incorrect key lengths"); 2136 goto error_out; 2137 } 2138 if (priv->flc_desc[0].desc[2] & 1) { 2139 cipherdata.key_type = RTA_DATA_IMM; 2140 } else { 2141 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); 2142 cipherdata.key_type = RTA_DATA_PTR; 2143 } 2144 if (priv->flc_desc[0].desc[2] & (1 << 1)) { 2145 authdata.key_type = RTA_DATA_IMM; 2146 } else { 2147 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key); 2148 authdata.key_type = RTA_DATA_PTR; 2149 } 2150 priv->flc_desc[0].desc[0] = 0; 2151 priv->flc_desc[0].desc[1] = 0; 2152 priv->flc_desc[0].desc[2] = 0; 2153 2154 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) { 2155 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1, 2156 0, &cipherdata, &authdata, 2157 session->iv.length, 2158 ctxt->auth_only_len, 2159 session->digest_length, 2160 session->dir); 2161 if (bufsize < 0) { 2162 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2163 goto error_out; 2164 } 2165 } else { 2166 DPAA2_SEC_ERR("Hash before cipher not supported"); 2167 goto error_out; 2168 } 2169 2170 flc->word1_sdl = (uint8_t)bufsize; 2171 flc->word2_rflc_31_0 = lower_32_bits( 2172 (size_t)&(((struct dpaa2_sec_qp *) 2173 dev->data->queue_pairs[0])->rx_vq)); 2174 flc->word3_rflc_63_32 = upper_32_bits( 2175 (size_t)&(((struct dpaa2_sec_qp *) 2176 dev->data->queue_pairs[0])->rx_vq)); 2177 session->ctxt = priv; 2178 for (i = 0; i < bufsize; i++) 2179 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", 2180 i, priv->flc_desc[0].desc[i]); 2181 2182 return 0; 2183 2184 error_out: 2185 rte_free(session->cipher_key.data); 2186 rte_free(session->auth_key.data); 2187 rte_free(priv); 2188 return -1; 2189 } 2190 2191 static int 2192 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev, 2193 struct rte_crypto_sym_xform *xform, void *sess) 2194 { 2195 dpaa2_sec_session *session = sess; 2196 2197 PMD_INIT_FUNC_TRACE(); 2198 2199 if (unlikely(sess == NULL)) { 2200 DPAA2_SEC_ERR("Invalid session struct"); 2201 return -1; 2202 } 2203 2204 memset(session, 0, sizeof(dpaa2_sec_session)); 2205 /* Default IV length = 0 */ 2206 session->iv.length = 0; 2207 2208 /* Cipher Only */ 2209 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 2210 session->ctxt_type = DPAA2_SEC_CIPHER; 2211 dpaa2_sec_cipher_init(dev, xform, session); 2212 2213 /* Authentication Only */ 2214 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2215 xform->next == NULL) { 2216 session->ctxt_type = DPAA2_SEC_AUTH; 2217 dpaa2_sec_auth_init(dev, xform, session); 2218 2219 /* Cipher then Authenticate */ 2220 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 2221 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2222 session->ext_params.aead_ctxt.auth_cipher_text = true; 2223 dpaa2_sec_aead_chain_init(dev, xform, session); 2224 2225 /* Authenticate then Cipher */ 2226 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2227 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2228 session->ext_params.aead_ctxt.auth_cipher_text = false; 2229 dpaa2_sec_aead_chain_init(dev, xform, session); 2230 2231 /* AEAD operation for AES-GCM kind of Algorithms */ 2232 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && 2233 xform->next == NULL) { 2234 dpaa2_sec_aead_init(dev, xform, session); 2235 2236 } else { 2237 DPAA2_SEC_ERR("Invalid crypto type"); 2238 return -EINVAL; 2239 } 2240 2241 return 0; 2242 } 2243 2244 static int 2245 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform, 2246 dpaa2_sec_session *session, 2247 struct alginfo *aeaddata) 2248 { 2249 PMD_INIT_FUNC_TRACE(); 2250 2251 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 2252 RTE_CACHE_LINE_SIZE); 2253 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 2254 DPAA2_SEC_ERR("No Memory for aead key"); 2255 return -1; 2256 } 2257 memcpy(session->aead_key.data, aead_xform->key.data, 2258 aead_xform->key.length); 2259 2260 session->digest_length = aead_xform->digest_length; 2261 session->aead_key.length = aead_xform->key.length; 2262 2263 aeaddata->key = (size_t)session->aead_key.data; 2264 aeaddata->keylen = session->aead_key.length; 2265 aeaddata->key_enc_flags = 0; 2266 aeaddata->key_type = RTA_DATA_IMM; 2267 2268 switch (aead_xform->algo) { 2269 case RTE_CRYPTO_AEAD_AES_GCM: 2270 aeaddata->algtype = OP_ALG_ALGSEL_AES; 2271 aeaddata->algmode = OP_ALG_AAI_GCM; 2272 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; 2273 break; 2274 case RTE_CRYPTO_AEAD_AES_CCM: 2275 aeaddata->algtype = OP_ALG_ALGSEL_AES; 2276 aeaddata->algmode = OP_ALG_AAI_CCM; 2277 session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM; 2278 break; 2279 default: 2280 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u", 2281 aead_xform->algo); 2282 return -1; 2283 } 2284 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 2285 DIR_ENC : DIR_DEC; 2286 2287 return 0; 2288 } 2289 2290 static int 2291 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform, 2292 struct rte_crypto_auth_xform *auth_xform, 2293 dpaa2_sec_session *session, 2294 struct alginfo *cipherdata, 2295 struct alginfo *authdata) 2296 { 2297 if (cipher_xform) { 2298 session->cipher_key.data = rte_zmalloc(NULL, 2299 cipher_xform->key.length, 2300 RTE_CACHE_LINE_SIZE); 2301 if (session->cipher_key.data == NULL && 2302 cipher_xform->key.length > 0) { 2303 DPAA2_SEC_ERR("No Memory for cipher key"); 2304 return -ENOMEM; 2305 } 2306 2307 session->cipher_key.length = cipher_xform->key.length; 2308 memcpy(session->cipher_key.data, cipher_xform->key.data, 2309 cipher_xform->key.length); 2310 session->cipher_alg = cipher_xform->algo; 2311 } else { 2312 session->cipher_key.data = NULL; 2313 session->cipher_key.length = 0; 2314 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2315 } 2316 2317 if (auth_xform) { 2318 session->auth_key.data = rte_zmalloc(NULL, 2319 auth_xform->key.length, 2320 RTE_CACHE_LINE_SIZE); 2321 if (session->auth_key.data == NULL && 2322 auth_xform->key.length > 0) { 2323 DPAA2_SEC_ERR("No Memory for auth key"); 2324 return -ENOMEM; 2325 } 2326 session->auth_key.length = auth_xform->key.length; 2327 memcpy(session->auth_key.data, auth_xform->key.data, 2328 auth_xform->key.length); 2329 session->auth_alg = auth_xform->algo; 2330 } else { 2331 session->auth_key.data = NULL; 2332 session->auth_key.length = 0; 2333 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 2334 } 2335 2336 authdata->key = (size_t)session->auth_key.data; 2337 authdata->keylen = session->auth_key.length; 2338 authdata->key_enc_flags = 0; 2339 authdata->key_type = RTA_DATA_IMM; 2340 switch (session->auth_alg) { 2341 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2342 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96; 2343 authdata->algmode = OP_ALG_AAI_HMAC; 2344 break; 2345 case RTE_CRYPTO_AUTH_MD5_HMAC: 2346 authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96; 2347 authdata->algmode = OP_ALG_AAI_HMAC; 2348 break; 2349 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2350 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128; 2351 authdata->algmode = OP_ALG_AAI_HMAC; 2352 break; 2353 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2354 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192; 2355 authdata->algmode = OP_ALG_AAI_HMAC; 2356 break; 2357 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2358 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256; 2359 authdata->algmode = OP_ALG_AAI_HMAC; 2360 break; 2361 case RTE_CRYPTO_AUTH_AES_CMAC: 2362 authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96; 2363 break; 2364 case RTE_CRYPTO_AUTH_NULL: 2365 authdata->algtype = OP_PCL_IPSEC_HMAC_NULL; 2366 break; 2367 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2368 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2369 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2370 case RTE_CRYPTO_AUTH_SHA1: 2371 case RTE_CRYPTO_AUTH_SHA256: 2372 case RTE_CRYPTO_AUTH_SHA512: 2373 case RTE_CRYPTO_AUTH_SHA224: 2374 case RTE_CRYPTO_AUTH_SHA384: 2375 case RTE_CRYPTO_AUTH_MD5: 2376 case RTE_CRYPTO_AUTH_AES_GMAC: 2377 case RTE_CRYPTO_AUTH_KASUMI_F9: 2378 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 2379 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2380 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 2381 session->auth_alg); 2382 return -1; 2383 default: 2384 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", 2385 session->auth_alg); 2386 return -1; 2387 } 2388 cipherdata->key = (size_t)session->cipher_key.data; 2389 cipherdata->keylen = session->cipher_key.length; 2390 cipherdata->key_enc_flags = 0; 2391 cipherdata->key_type = RTA_DATA_IMM; 2392 2393 switch (session->cipher_alg) { 2394 case RTE_CRYPTO_CIPHER_AES_CBC: 2395 cipherdata->algtype = OP_PCL_IPSEC_AES_CBC; 2396 cipherdata->algmode = OP_ALG_AAI_CBC; 2397 break; 2398 case RTE_CRYPTO_CIPHER_3DES_CBC: 2399 cipherdata->algtype = OP_PCL_IPSEC_3DES; 2400 cipherdata->algmode = OP_ALG_AAI_CBC; 2401 break; 2402 case RTE_CRYPTO_CIPHER_AES_CTR: 2403 cipherdata->algtype = OP_PCL_IPSEC_AES_CTR; 2404 cipherdata->algmode = OP_ALG_AAI_CTR; 2405 break; 2406 case RTE_CRYPTO_CIPHER_NULL: 2407 cipherdata->algtype = OP_PCL_IPSEC_NULL; 2408 break; 2409 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2410 case RTE_CRYPTO_CIPHER_3DES_ECB: 2411 case RTE_CRYPTO_CIPHER_AES_ECB: 2412 case RTE_CRYPTO_CIPHER_KASUMI_F8: 2413 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", 2414 session->cipher_alg); 2415 return -1; 2416 default: 2417 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 2418 session->cipher_alg); 2419 return -1; 2420 } 2421 2422 return 0; 2423 } 2424 2425 #ifdef RTE_LIBRTE_SECURITY_TEST 2426 static uint8_t aes_cbc_iv[] = { 2427 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 2428 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }; 2429 #endif 2430 2431 static int 2432 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, 2433 struct rte_security_session_conf *conf, 2434 void *sess) 2435 { 2436 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec; 2437 struct rte_crypto_cipher_xform *cipher_xform = NULL; 2438 struct rte_crypto_auth_xform *auth_xform = NULL; 2439 struct rte_crypto_aead_xform *aead_xform = NULL; 2440 dpaa2_sec_session *session = (dpaa2_sec_session *)sess; 2441 struct ctxt_priv *priv; 2442 struct ipsec_encap_pdb encap_pdb; 2443 struct ipsec_decap_pdb decap_pdb; 2444 struct alginfo authdata, cipherdata; 2445 int bufsize; 2446 struct sec_flow_context *flc; 2447 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 2448 int ret = -1; 2449 2450 PMD_INIT_FUNC_TRACE(); 2451 2452 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2453 sizeof(struct ctxt_priv) + 2454 sizeof(struct sec_flc_desc), 2455 RTE_CACHE_LINE_SIZE); 2456 2457 if (priv == NULL) { 2458 DPAA2_SEC_ERR("No memory for priv CTXT"); 2459 return -ENOMEM; 2460 } 2461 2462 priv->fle_pool = dev_priv->fle_pool; 2463 flc = &priv->flc_desc[0].flc; 2464 2465 memset(session, 0, sizeof(dpaa2_sec_session)); 2466 2467 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2468 cipher_xform = &conf->crypto_xform->cipher; 2469 if (conf->crypto_xform->next) 2470 auth_xform = &conf->crypto_xform->next->auth; 2471 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform, 2472 session, &cipherdata, &authdata); 2473 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2474 auth_xform = &conf->crypto_xform->auth; 2475 if (conf->crypto_xform->next) 2476 cipher_xform = &conf->crypto_xform->next->cipher; 2477 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform, 2478 session, &cipherdata, &authdata); 2479 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { 2480 aead_xform = &conf->crypto_xform->aead; 2481 ret = dpaa2_sec_ipsec_aead_init(aead_xform, 2482 session, &cipherdata); 2483 } else { 2484 DPAA2_SEC_ERR("XFORM not specified"); 2485 ret = -EINVAL; 2486 goto out; 2487 } 2488 if (ret) { 2489 DPAA2_SEC_ERR("Failed to process xform"); 2490 goto out; 2491 } 2492 2493 session->ctxt_type = DPAA2_SEC_IPSEC; 2494 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 2495 struct ip ip4_hdr; 2496 2497 flc->dhr = SEC_FLC_DHR_OUTBOUND; 2498 ip4_hdr.ip_v = IPVERSION; 2499 ip4_hdr.ip_hl = 5; 2500 ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr)); 2501 ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp; 2502 ip4_hdr.ip_id = 0; 2503 ip4_hdr.ip_off = 0; 2504 ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl; 2505 ip4_hdr.ip_p = IPPROTO_ESP; 2506 ip4_hdr.ip_sum = 0; 2507 ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip; 2508 ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip; 2509 ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)&ip4_hdr, 2510 sizeof(struct ip)); 2511 2512 /* For Sec Proto only one descriptor is required. */ 2513 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb)); 2514 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) | 2515 PDBOPTS_ESP_OIHI_PDB_INL | 2516 PDBOPTS_ESP_IVSRC | 2517 PDBHMO_ESP_ENCAP_DTTL | 2518 PDBHMO_ESP_SNR; 2519 encap_pdb.spi = ipsec_xform->spi; 2520 encap_pdb.ip_hdr_len = sizeof(struct ip); 2521 2522 session->dir = DIR_ENC; 2523 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc, 2524 1, 0, SHR_SERIAL, &encap_pdb, 2525 (uint8_t *)&ip4_hdr, 2526 &cipherdata, &authdata); 2527 } else if (ipsec_xform->direction == 2528 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { 2529 flc->dhr = SEC_FLC_DHR_INBOUND; 2530 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb)); 2531 decap_pdb.options = sizeof(struct ip) << 16; 2532 session->dir = DIR_DEC; 2533 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc, 2534 1, 0, SHR_SERIAL, 2535 &decap_pdb, &cipherdata, &authdata); 2536 } else 2537 goto out; 2538 2539 if (bufsize < 0) { 2540 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2541 goto out; 2542 } 2543 2544 flc->word1_sdl = (uint8_t)bufsize; 2545 2546 /* Enable the stashing control bit */ 2547 DPAA2_SET_FLC_RSC(flc); 2548 flc->word2_rflc_31_0 = lower_32_bits( 2549 (size_t)&(((struct dpaa2_sec_qp *) 2550 dev->data->queue_pairs[0])->rx_vq) | 0x14); 2551 flc->word3_rflc_63_32 = upper_32_bits( 2552 (size_t)&(((struct dpaa2_sec_qp *) 2553 dev->data->queue_pairs[0])->rx_vq)); 2554 2555 /* Set EWS bit i.e. enable write-safe */ 2556 DPAA2_SET_FLC_EWS(flc); 2557 /* Set BS = 1 i.e reuse input buffers as output buffers */ 2558 DPAA2_SET_FLC_REUSE_BS(flc); 2559 /* Set FF = 10; reuse input buffers if they provide sufficient space */ 2560 DPAA2_SET_FLC_REUSE_FF(flc); 2561 2562 session->ctxt = priv; 2563 2564 return 0; 2565 out: 2566 rte_free(session->auth_key.data); 2567 rte_free(session->cipher_key.data); 2568 rte_free(priv); 2569 return ret; 2570 } 2571 2572 static int 2573 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev, 2574 struct rte_security_session_conf *conf, 2575 void *sess) 2576 { 2577 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp; 2578 struct rte_crypto_sym_xform *xform = conf->crypto_xform; 2579 struct rte_crypto_auth_xform *auth_xform = NULL; 2580 struct rte_crypto_cipher_xform *cipher_xform; 2581 dpaa2_sec_session *session = (dpaa2_sec_session *)sess; 2582 struct ctxt_priv *priv; 2583 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 2584 struct alginfo authdata, cipherdata; 2585 int bufsize = -1; 2586 struct sec_flow_context *flc; 2587 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 2588 int swap = true; 2589 #else 2590 int swap = false; 2591 #endif 2592 2593 PMD_INIT_FUNC_TRACE(); 2594 2595 memset(session, 0, sizeof(dpaa2_sec_session)); 2596 2597 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2598 sizeof(struct ctxt_priv) + 2599 sizeof(struct sec_flc_desc), 2600 RTE_CACHE_LINE_SIZE); 2601 2602 if (priv == NULL) { 2603 DPAA2_SEC_ERR("No memory for priv CTXT"); 2604 return -ENOMEM; 2605 } 2606 2607 priv->fle_pool = dev_priv->fle_pool; 2608 flc = &priv->flc_desc[0].flc; 2609 2610 /* find xfrm types */ 2611 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 2612 cipher_xform = &xform->cipher; 2613 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 2614 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2615 session->ext_params.aead_ctxt.auth_cipher_text = true; 2616 cipher_xform = &xform->cipher; 2617 auth_xform = &xform->next->auth; 2618 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2619 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2620 session->ext_params.aead_ctxt.auth_cipher_text = false; 2621 cipher_xform = &xform->next->cipher; 2622 auth_xform = &xform->auth; 2623 } else { 2624 DPAA2_SEC_ERR("Invalid crypto type"); 2625 return -EINVAL; 2626 } 2627 2628 session->ctxt_type = DPAA2_SEC_PDCP; 2629 if (cipher_xform) { 2630 session->cipher_key.data = rte_zmalloc(NULL, 2631 cipher_xform->key.length, 2632 RTE_CACHE_LINE_SIZE); 2633 if (session->cipher_key.data == NULL && 2634 cipher_xform->key.length > 0) { 2635 DPAA2_SEC_ERR("No Memory for cipher key"); 2636 rte_free(priv); 2637 return -ENOMEM; 2638 } 2639 session->cipher_key.length = cipher_xform->key.length; 2640 memcpy(session->cipher_key.data, cipher_xform->key.data, 2641 cipher_xform->key.length); 2642 session->dir = 2643 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2644 DIR_ENC : DIR_DEC; 2645 session->cipher_alg = cipher_xform->algo; 2646 } else { 2647 session->cipher_key.data = NULL; 2648 session->cipher_key.length = 0; 2649 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2650 session->dir = DIR_ENC; 2651 } 2652 2653 session->pdcp.domain = pdcp_xform->domain; 2654 session->pdcp.bearer = pdcp_xform->bearer; 2655 session->pdcp.pkt_dir = pdcp_xform->pkt_dir; 2656 session->pdcp.sn_size = pdcp_xform->sn_size; 2657 #ifdef ENABLE_HFN_OVERRIDE 2658 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovd; 2659 #endif 2660 session->pdcp.hfn = pdcp_xform->hfn; 2661 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold; 2662 2663 cipherdata.key = (size_t)session->cipher_key.data; 2664 cipherdata.keylen = session->cipher_key.length; 2665 cipherdata.key_enc_flags = 0; 2666 cipherdata.key_type = RTA_DATA_IMM; 2667 2668 switch (session->cipher_alg) { 2669 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2670 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW; 2671 break; 2672 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2673 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC; 2674 break; 2675 case RTE_CRYPTO_CIPHER_AES_CTR: 2676 cipherdata.algtype = PDCP_CIPHER_TYPE_AES; 2677 break; 2678 case RTE_CRYPTO_CIPHER_NULL: 2679 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL; 2680 break; 2681 default: 2682 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 2683 session->cipher_alg); 2684 goto out; 2685 } 2686 2687 /* Auth is only applicable for control mode operation. */ 2688 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 2689 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5) { 2690 DPAA2_SEC_ERR( 2691 "PDCP Seq Num size should be 5 bits for cmode"); 2692 goto out; 2693 } 2694 if (auth_xform) { 2695 session->auth_key.data = rte_zmalloc(NULL, 2696 auth_xform->key.length, 2697 RTE_CACHE_LINE_SIZE); 2698 if (session->auth_key.data == NULL && 2699 auth_xform->key.length > 0) { 2700 DPAA2_SEC_ERR("No Memory for auth key"); 2701 rte_free(session->cipher_key.data); 2702 rte_free(priv); 2703 return -ENOMEM; 2704 } 2705 session->auth_key.length = auth_xform->key.length; 2706 memcpy(session->auth_key.data, auth_xform->key.data, 2707 auth_xform->key.length); 2708 session->auth_alg = auth_xform->algo; 2709 } else { 2710 session->auth_key.data = NULL; 2711 session->auth_key.length = 0; 2712 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 2713 } 2714 authdata.key = (size_t)session->auth_key.data; 2715 authdata.keylen = session->auth_key.length; 2716 authdata.key_enc_flags = 0; 2717 authdata.key_type = RTA_DATA_IMM; 2718 2719 switch (session->auth_alg) { 2720 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2721 authdata.algtype = PDCP_AUTH_TYPE_SNOW; 2722 break; 2723 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2724 authdata.algtype = PDCP_AUTH_TYPE_ZUC; 2725 break; 2726 case RTE_CRYPTO_AUTH_AES_CMAC: 2727 authdata.algtype = PDCP_AUTH_TYPE_AES; 2728 break; 2729 case RTE_CRYPTO_AUTH_NULL: 2730 authdata.algtype = PDCP_AUTH_TYPE_NULL; 2731 break; 2732 default: 2733 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 2734 session->auth_alg); 2735 goto out; 2736 } 2737 2738 if (session->dir == DIR_ENC) 2739 bufsize = cnstr_shdsc_pdcp_c_plane_encap( 2740 priv->flc_desc[0].desc, 1, swap, 2741 pdcp_xform->hfn, 2742 pdcp_xform->bearer, 2743 pdcp_xform->pkt_dir, 2744 pdcp_xform->hfn_threshold, 2745 &cipherdata, &authdata, 2746 0); 2747 else if (session->dir == DIR_DEC) 2748 bufsize = cnstr_shdsc_pdcp_c_plane_decap( 2749 priv->flc_desc[0].desc, 1, swap, 2750 pdcp_xform->hfn, 2751 pdcp_xform->bearer, 2752 pdcp_xform->pkt_dir, 2753 pdcp_xform->hfn_threshold, 2754 &cipherdata, &authdata, 2755 0); 2756 } else { 2757 if (session->dir == DIR_ENC) 2758 bufsize = cnstr_shdsc_pdcp_u_plane_encap( 2759 priv->flc_desc[0].desc, 1, swap, 2760 (enum pdcp_sn_size)pdcp_xform->sn_size, 2761 pdcp_xform->hfn, 2762 pdcp_xform->bearer, 2763 pdcp_xform->pkt_dir, 2764 pdcp_xform->hfn_threshold, 2765 &cipherdata, 0); 2766 else if (session->dir == DIR_DEC) 2767 bufsize = cnstr_shdsc_pdcp_u_plane_decap( 2768 priv->flc_desc[0].desc, 1, swap, 2769 (enum pdcp_sn_size)pdcp_xform->sn_size, 2770 pdcp_xform->hfn, 2771 pdcp_xform->bearer, 2772 pdcp_xform->pkt_dir, 2773 pdcp_xform->hfn_threshold, 2774 &cipherdata, 0); 2775 } 2776 2777 if (bufsize < 0) { 2778 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2779 goto out; 2780 } 2781 2782 /* Enable the stashing control bit */ 2783 DPAA2_SET_FLC_RSC(flc); 2784 flc->word2_rflc_31_0 = lower_32_bits( 2785 (size_t)&(((struct dpaa2_sec_qp *) 2786 dev->data->queue_pairs[0])->rx_vq) | 0x14); 2787 flc->word3_rflc_63_32 = upper_32_bits( 2788 (size_t)&(((struct dpaa2_sec_qp *) 2789 dev->data->queue_pairs[0])->rx_vq)); 2790 2791 flc->word1_sdl = (uint8_t)bufsize; 2792 2793 /* Set EWS bit i.e. enable write-safe */ 2794 DPAA2_SET_FLC_EWS(flc); 2795 /* Set BS = 1 i.e reuse input buffers as output buffers */ 2796 DPAA2_SET_FLC_REUSE_BS(flc); 2797 /* Set FF = 10; reuse input buffers if they provide sufficient space */ 2798 DPAA2_SET_FLC_REUSE_FF(flc); 2799 2800 session->ctxt = priv; 2801 2802 return 0; 2803 out: 2804 rte_free(session->auth_key.data); 2805 rte_free(session->cipher_key.data); 2806 rte_free(priv); 2807 return -1; 2808 } 2809 2810 static int 2811 dpaa2_sec_security_session_create(void *dev, 2812 struct rte_security_session_conf *conf, 2813 struct rte_security_session *sess, 2814 struct rte_mempool *mempool) 2815 { 2816 void *sess_private_data; 2817 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; 2818 int ret; 2819 2820 if (rte_mempool_get(mempool, &sess_private_data)) { 2821 DPAA2_SEC_ERR("Couldn't get object from session mempool"); 2822 return -ENOMEM; 2823 } 2824 2825 switch (conf->protocol) { 2826 case RTE_SECURITY_PROTOCOL_IPSEC: 2827 ret = dpaa2_sec_set_ipsec_session(cdev, conf, 2828 sess_private_data); 2829 break; 2830 case RTE_SECURITY_PROTOCOL_MACSEC: 2831 return -ENOTSUP; 2832 case RTE_SECURITY_PROTOCOL_PDCP: 2833 ret = dpaa2_sec_set_pdcp_session(cdev, conf, 2834 sess_private_data); 2835 break; 2836 default: 2837 return -EINVAL; 2838 } 2839 if (ret != 0) { 2840 DPAA2_SEC_ERR("Failed to configure session parameters"); 2841 /* Return session to mempool */ 2842 rte_mempool_put(mempool, sess_private_data); 2843 return ret; 2844 } 2845 2846 set_sec_session_private_data(sess, sess_private_data); 2847 2848 return ret; 2849 } 2850 2851 /** Clear the memory of session so it doesn't leave key material behind */ 2852 static int 2853 dpaa2_sec_security_session_destroy(void *dev __rte_unused, 2854 struct rte_security_session *sess) 2855 { 2856 PMD_INIT_FUNC_TRACE(); 2857 void *sess_priv = get_sec_session_private_data(sess); 2858 2859 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv; 2860 2861 if (sess_priv) { 2862 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 2863 2864 rte_free(s->ctxt); 2865 rte_free(s->cipher_key.data); 2866 rte_free(s->auth_key.data); 2867 memset(sess, 0, sizeof(dpaa2_sec_session)); 2868 set_sec_session_private_data(sess, NULL); 2869 rte_mempool_put(sess_mp, sess_priv); 2870 } 2871 return 0; 2872 } 2873 2874 static int 2875 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev, 2876 struct rte_crypto_sym_xform *xform, 2877 struct rte_cryptodev_sym_session *sess, 2878 struct rte_mempool *mempool) 2879 { 2880 void *sess_private_data; 2881 int ret; 2882 2883 if (rte_mempool_get(mempool, &sess_private_data)) { 2884 DPAA2_SEC_ERR("Couldn't get object from session mempool"); 2885 return -ENOMEM; 2886 } 2887 2888 ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data); 2889 if (ret != 0) { 2890 DPAA2_SEC_ERR("Failed to configure session parameters"); 2891 /* Return session to mempool */ 2892 rte_mempool_put(mempool, sess_private_data); 2893 return ret; 2894 } 2895 2896 set_sym_session_private_data(sess, dev->driver_id, 2897 sess_private_data); 2898 2899 return 0; 2900 } 2901 2902 /** Clear the memory of session so it doesn't leave key material behind */ 2903 static void 2904 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev, 2905 struct rte_cryptodev_sym_session *sess) 2906 { 2907 PMD_INIT_FUNC_TRACE(); 2908 uint8_t index = dev->driver_id; 2909 void *sess_priv = get_sym_session_private_data(sess, index); 2910 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv; 2911 2912 if (sess_priv) { 2913 rte_free(s->ctxt); 2914 rte_free(s->cipher_key.data); 2915 rte_free(s->auth_key.data); 2916 memset(sess, 0, sizeof(dpaa2_sec_session)); 2917 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 2918 set_sym_session_private_data(sess, index, NULL); 2919 rte_mempool_put(sess_mp, sess_priv); 2920 } 2921 } 2922 2923 static int 2924 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused, 2925 struct rte_cryptodev_config *config __rte_unused) 2926 { 2927 PMD_INIT_FUNC_TRACE(); 2928 2929 return 0; 2930 } 2931 2932 static int 2933 dpaa2_sec_dev_start(struct rte_cryptodev *dev) 2934 { 2935 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 2936 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 2937 struct dpseci_attr attr; 2938 struct dpaa2_queue *dpaa2_q; 2939 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 2940 dev->data->queue_pairs; 2941 struct dpseci_rx_queue_attr rx_attr; 2942 struct dpseci_tx_queue_attr tx_attr; 2943 int ret, i; 2944 2945 PMD_INIT_FUNC_TRACE(); 2946 2947 memset(&attr, 0, sizeof(struct dpseci_attr)); 2948 2949 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token); 2950 if (ret) { 2951 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED", 2952 priv->hw_id); 2953 goto get_attr_failure; 2954 } 2955 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr); 2956 if (ret) { 2957 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC"); 2958 goto get_attr_failure; 2959 } 2960 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) { 2961 dpaa2_q = &qp[i]->rx_vq; 2962 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 2963 &rx_attr); 2964 dpaa2_q->fqid = rx_attr.fqid; 2965 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid); 2966 } 2967 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) { 2968 dpaa2_q = &qp[i]->tx_vq; 2969 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 2970 &tx_attr); 2971 dpaa2_q->fqid = tx_attr.fqid; 2972 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid); 2973 } 2974 2975 return 0; 2976 get_attr_failure: 2977 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 2978 return -1; 2979 } 2980 2981 static void 2982 dpaa2_sec_dev_stop(struct rte_cryptodev *dev) 2983 { 2984 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 2985 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 2986 int ret; 2987 2988 PMD_INIT_FUNC_TRACE(); 2989 2990 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 2991 if (ret) { 2992 DPAA2_SEC_ERR("Failure in disabling dpseci %d device", 2993 priv->hw_id); 2994 return; 2995 } 2996 2997 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token); 2998 if (ret < 0) { 2999 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret); 3000 return; 3001 } 3002 } 3003 3004 static int 3005 dpaa2_sec_dev_close(struct rte_cryptodev *dev) 3006 { 3007 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3008 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3009 int ret; 3010 3011 PMD_INIT_FUNC_TRACE(); 3012 3013 /* Function is reverse of dpaa2_sec_dev_init. 3014 * It does the following: 3015 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id 3016 * 2. Close the DPSECI device 3017 * 3. Free the allocated resources. 3018 */ 3019 3020 /*Close the device at underlying layer*/ 3021 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token); 3022 if (ret) { 3023 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret); 3024 return -1; 3025 } 3026 3027 /*Free the allocated memory for ethernet private data and dpseci*/ 3028 priv->hw = NULL; 3029 rte_free(dpseci); 3030 3031 return 0; 3032 } 3033 3034 static void 3035 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev, 3036 struct rte_cryptodev_info *info) 3037 { 3038 struct dpaa2_sec_dev_private *internals = dev->data->dev_private; 3039 3040 PMD_INIT_FUNC_TRACE(); 3041 if (info != NULL) { 3042 info->max_nb_queue_pairs = internals->max_nb_queue_pairs; 3043 info->feature_flags = dev->feature_flags; 3044 info->capabilities = dpaa2_sec_capabilities; 3045 /* No limit of number of sessions */ 3046 info->sym.max_nb_sessions = 0; 3047 info->driver_id = cryptodev_driver_id; 3048 } 3049 } 3050 3051 static 3052 void dpaa2_sec_stats_get(struct rte_cryptodev *dev, 3053 struct rte_cryptodev_stats *stats) 3054 { 3055 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3056 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3057 struct dpseci_sec_counters counters = {0}; 3058 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3059 dev->data->queue_pairs; 3060 int ret, i; 3061 3062 PMD_INIT_FUNC_TRACE(); 3063 if (stats == NULL) { 3064 DPAA2_SEC_ERR("Invalid stats ptr NULL"); 3065 return; 3066 } 3067 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 3068 if (qp[i] == NULL) { 3069 DPAA2_SEC_DEBUG("Uninitialised queue pair"); 3070 continue; 3071 } 3072 3073 stats->enqueued_count += qp[i]->tx_vq.tx_pkts; 3074 stats->dequeued_count += qp[i]->rx_vq.rx_pkts; 3075 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts; 3076 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts; 3077 } 3078 3079 ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token, 3080 &counters); 3081 if (ret) { 3082 DPAA2_SEC_ERR("SEC counters failed"); 3083 } else { 3084 DPAA2_SEC_INFO("dpseci hardware stats:" 3085 "\n\tNum of Requests Dequeued = %" PRIu64 3086 "\n\tNum of Outbound Encrypt Requests = %" PRIu64 3087 "\n\tNum of Inbound Decrypt Requests = %" PRIu64 3088 "\n\tNum of Outbound Bytes Encrypted = %" PRIu64 3089 "\n\tNum of Outbound Bytes Protected = %" PRIu64 3090 "\n\tNum of Inbound Bytes Decrypted = %" PRIu64 3091 "\n\tNum of Inbound Bytes Validated = %" PRIu64, 3092 counters.dequeued_requests, 3093 counters.ob_enc_requests, 3094 counters.ib_dec_requests, 3095 counters.ob_enc_bytes, 3096 counters.ob_prot_bytes, 3097 counters.ib_dec_bytes, 3098 counters.ib_valid_bytes); 3099 } 3100 } 3101 3102 static 3103 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev) 3104 { 3105 int i; 3106 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3107 (dev->data->queue_pairs); 3108 3109 PMD_INIT_FUNC_TRACE(); 3110 3111 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 3112 if (qp[i] == NULL) { 3113 DPAA2_SEC_DEBUG("Uninitialised queue pair"); 3114 continue; 3115 } 3116 qp[i]->tx_vq.rx_pkts = 0; 3117 qp[i]->tx_vq.tx_pkts = 0; 3118 qp[i]->tx_vq.err_pkts = 0; 3119 qp[i]->rx_vq.rx_pkts = 0; 3120 qp[i]->rx_vq.tx_pkts = 0; 3121 qp[i]->rx_vq.err_pkts = 0; 3122 } 3123 } 3124 3125 static void __attribute__((hot)) 3126 dpaa2_sec_process_parallel_event(struct qbman_swp *swp, 3127 const struct qbman_fd *fd, 3128 const struct qbman_result *dq, 3129 struct dpaa2_queue *rxq, 3130 struct rte_event *ev) 3131 { 3132 /* Prefetching mbuf */ 3133 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)- 3134 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size)); 3135 3136 /* Prefetching ipsec crypto_op stored in priv data of mbuf */ 3137 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64)); 3138 3139 ev->flow_id = rxq->ev.flow_id; 3140 ev->sub_event_type = rxq->ev.sub_event_type; 3141 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3142 ev->op = RTE_EVENT_OP_NEW; 3143 ev->sched_type = rxq->ev.sched_type; 3144 ev->queue_id = rxq->ev.queue_id; 3145 ev->priority = rxq->ev.priority; 3146 ev->event_ptr = sec_fd_to_mbuf(fd, ((struct rte_cryptodev *) 3147 (rxq->dev))->driver_id); 3148 3149 qbman_swp_dqrr_consume(swp, dq); 3150 } 3151 static void 3152 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __attribute__((unused)), 3153 const struct qbman_fd *fd, 3154 const struct qbman_result *dq, 3155 struct dpaa2_queue *rxq, 3156 struct rte_event *ev) 3157 { 3158 uint8_t dqrr_index; 3159 struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr; 3160 /* Prefetching mbuf */ 3161 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)- 3162 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size)); 3163 3164 /* Prefetching ipsec crypto_op stored in priv data of mbuf */ 3165 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64)); 3166 3167 ev->flow_id = rxq->ev.flow_id; 3168 ev->sub_event_type = rxq->ev.sub_event_type; 3169 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3170 ev->op = RTE_EVENT_OP_NEW; 3171 ev->sched_type = rxq->ev.sched_type; 3172 ev->queue_id = rxq->ev.queue_id; 3173 ev->priority = rxq->ev.priority; 3174 3175 ev->event_ptr = sec_fd_to_mbuf(fd, ((struct rte_cryptodev *) 3176 (rxq->dev))->driver_id); 3177 dqrr_index = qbman_get_dqrr_idx(dq); 3178 crypto_op->sym->m_src->seqn = dqrr_index + 1; 3179 DPAA2_PER_LCORE_DQRR_SIZE++; 3180 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; 3181 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src; 3182 } 3183 3184 int 3185 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev, 3186 int qp_id, 3187 uint16_t dpcon_id, 3188 const struct rte_event *event) 3189 { 3190 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3191 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3192 struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id]; 3193 struct dpseci_rx_queue_cfg cfg; 3194 int ret; 3195 3196 if (event->sched_type == RTE_SCHED_TYPE_PARALLEL) 3197 qp->rx_vq.cb = dpaa2_sec_process_parallel_event; 3198 else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) 3199 qp->rx_vq.cb = dpaa2_sec_process_atomic_event; 3200 else 3201 return -EINVAL; 3202 3203 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 3204 cfg.options = DPSECI_QUEUE_OPT_DEST; 3205 cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON; 3206 cfg.dest_cfg.dest_id = dpcon_id; 3207 cfg.dest_cfg.priority = event->priority; 3208 3209 cfg.options |= DPSECI_QUEUE_OPT_USER_CTX; 3210 cfg.user_ctx = (size_t)(qp); 3211 if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) { 3212 cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION; 3213 cfg.order_preservation_en = 1; 3214 } 3215 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 3216 qp_id, &cfg); 3217 if (ret) { 3218 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret); 3219 return ret; 3220 } 3221 3222 memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event)); 3223 3224 return 0; 3225 } 3226 3227 int 3228 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev, 3229 int qp_id) 3230 { 3231 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3232 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3233 struct dpseci_rx_queue_cfg cfg; 3234 int ret; 3235 3236 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 3237 cfg.options = DPSECI_QUEUE_OPT_DEST; 3238 cfg.dest_cfg.dest_type = DPSECI_DEST_NONE; 3239 3240 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 3241 qp_id, &cfg); 3242 if (ret) 3243 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret); 3244 3245 return ret; 3246 } 3247 3248 static struct rte_cryptodev_ops crypto_ops = { 3249 .dev_configure = dpaa2_sec_dev_configure, 3250 .dev_start = dpaa2_sec_dev_start, 3251 .dev_stop = dpaa2_sec_dev_stop, 3252 .dev_close = dpaa2_sec_dev_close, 3253 .dev_infos_get = dpaa2_sec_dev_infos_get, 3254 .stats_get = dpaa2_sec_stats_get, 3255 .stats_reset = dpaa2_sec_stats_reset, 3256 .queue_pair_setup = dpaa2_sec_queue_pair_setup, 3257 .queue_pair_release = dpaa2_sec_queue_pair_release, 3258 .queue_pair_count = dpaa2_sec_queue_pair_count, 3259 .sym_session_get_size = dpaa2_sec_sym_session_get_size, 3260 .sym_session_configure = dpaa2_sec_sym_session_configure, 3261 .sym_session_clear = dpaa2_sec_sym_session_clear, 3262 }; 3263 3264 static const struct rte_security_capability * 3265 dpaa2_sec_capabilities_get(void *device __rte_unused) 3266 { 3267 return dpaa2_sec_security_cap; 3268 } 3269 3270 static const struct rte_security_ops dpaa2_sec_security_ops = { 3271 .session_create = dpaa2_sec_security_session_create, 3272 .session_update = NULL, 3273 .session_stats_get = NULL, 3274 .session_destroy = dpaa2_sec_security_session_destroy, 3275 .set_pkt_metadata = NULL, 3276 .capabilities_get = dpaa2_sec_capabilities_get 3277 }; 3278 3279 static int 3280 dpaa2_sec_uninit(const struct rte_cryptodev *dev) 3281 { 3282 struct dpaa2_sec_dev_private *internals = dev->data->dev_private; 3283 3284 rte_free(dev->security_ctx); 3285 3286 rte_mempool_free(internals->fle_pool); 3287 3288 DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u", 3289 dev->data->name, rte_socket_id()); 3290 3291 return 0; 3292 } 3293 3294 static int 3295 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev) 3296 { 3297 struct dpaa2_sec_dev_private *internals; 3298 struct rte_device *dev = cryptodev->device; 3299 struct rte_dpaa2_device *dpaa2_dev; 3300 struct rte_security_ctx *security_instance; 3301 struct fsl_mc_io *dpseci; 3302 uint16_t token; 3303 struct dpseci_attr attr; 3304 int retcode, hw_id; 3305 char str[20]; 3306 3307 PMD_INIT_FUNC_TRACE(); 3308 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 3309 if (dpaa2_dev == NULL) { 3310 DPAA2_SEC_ERR("DPAA2 SEC device not found"); 3311 return -1; 3312 } 3313 hw_id = dpaa2_dev->object_id; 3314 3315 cryptodev->driver_id = cryptodev_driver_id; 3316 cryptodev->dev_ops = &crypto_ops; 3317 3318 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst; 3319 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst; 3320 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 3321 RTE_CRYPTODEV_FF_HW_ACCELERATED | 3322 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 3323 RTE_CRYPTODEV_FF_SECURITY | 3324 RTE_CRYPTODEV_FF_IN_PLACE_SGL | 3325 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | 3326 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | 3327 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | 3328 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; 3329 3330 internals = cryptodev->data->dev_private; 3331 3332 /* 3333 * For secondary processes, we don't initialise any further as primary 3334 * has already done this work. Only check we don't need a different 3335 * RX function 3336 */ 3337 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 3338 DPAA2_SEC_DEBUG("Device already init by primary process"); 3339 return 0; 3340 } 3341 3342 /* Initialize security_ctx only for primary process*/ 3343 security_instance = rte_malloc("rte_security_instances_ops", 3344 sizeof(struct rte_security_ctx), 0); 3345 if (security_instance == NULL) 3346 return -ENOMEM; 3347 security_instance->device = (void *)cryptodev; 3348 security_instance->ops = &dpaa2_sec_security_ops; 3349 security_instance->sess_cnt = 0; 3350 cryptodev->security_ctx = security_instance; 3351 3352 /*Open the rte device via MC and save the handle for further use*/ 3353 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1, 3354 sizeof(struct fsl_mc_io), 0); 3355 if (!dpseci) { 3356 DPAA2_SEC_ERR( 3357 "Error in allocating the memory for dpsec object"); 3358 return -1; 3359 } 3360 dpseci->regs = rte_mcp_ptr_list[0]; 3361 3362 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token); 3363 if (retcode != 0) { 3364 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x", 3365 retcode); 3366 goto init_error; 3367 } 3368 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr); 3369 if (retcode != 0) { 3370 DPAA2_SEC_ERR( 3371 "Cannot get dpsec device attributed: Error = %x", 3372 retcode); 3373 goto init_error; 3374 } 3375 sprintf(cryptodev->data->name, "dpsec-%u", hw_id); 3376 3377 internals->max_nb_queue_pairs = attr.num_tx_queues; 3378 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs; 3379 internals->hw = dpseci; 3380 internals->token = token; 3381 3382 sprintf(str, "fle_pool_%d", cryptodev->data->dev_id); 3383 internals->fle_pool = rte_mempool_create((const char *)str, 3384 FLE_POOL_NUM_BUFS, 3385 FLE_POOL_BUF_SIZE, 3386 FLE_POOL_CACHE_SIZE, 0, 3387 NULL, NULL, NULL, NULL, 3388 SOCKET_ID_ANY, 0); 3389 if (!internals->fle_pool) { 3390 DPAA2_SEC_ERR("Mempool (%s) creation failed", str); 3391 goto init_error; 3392 } 3393 3394 DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name); 3395 return 0; 3396 3397 init_error: 3398 DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name); 3399 3400 /* dpaa2_sec_uninit(crypto_dev_name); */ 3401 return -EFAULT; 3402 } 3403 3404 static int 3405 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused, 3406 struct rte_dpaa2_device *dpaa2_dev) 3407 { 3408 struct rte_cryptodev *cryptodev; 3409 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 3410 3411 int retval; 3412 3413 sprintf(cryptodev_name, "dpsec-%d", dpaa2_dev->object_id); 3414 3415 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id()); 3416 if (cryptodev == NULL) 3417 return -ENOMEM; 3418 3419 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 3420 cryptodev->data->dev_private = rte_zmalloc_socket( 3421 "cryptodev private structure", 3422 sizeof(struct dpaa2_sec_dev_private), 3423 RTE_CACHE_LINE_SIZE, 3424 rte_socket_id()); 3425 3426 if (cryptodev->data->dev_private == NULL) 3427 rte_panic("Cannot allocate memzone for private " 3428 "device data"); 3429 } 3430 3431 dpaa2_dev->cryptodev = cryptodev; 3432 cryptodev->device = &dpaa2_dev->device; 3433 3434 /* init user callbacks */ 3435 TAILQ_INIT(&(cryptodev->link_intr_cbs)); 3436 3437 /* Invoke PMD device initialization function */ 3438 retval = dpaa2_sec_dev_init(cryptodev); 3439 if (retval == 0) 3440 return 0; 3441 3442 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 3443 rte_free(cryptodev->data->dev_private); 3444 3445 cryptodev->attached = RTE_CRYPTODEV_DETACHED; 3446 3447 return -ENXIO; 3448 } 3449 3450 static int 3451 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev) 3452 { 3453 struct rte_cryptodev *cryptodev; 3454 int ret; 3455 3456 cryptodev = dpaa2_dev->cryptodev; 3457 if (cryptodev == NULL) 3458 return -ENODEV; 3459 3460 ret = dpaa2_sec_uninit(cryptodev); 3461 if (ret) 3462 return ret; 3463 3464 return rte_cryptodev_pmd_destroy(cryptodev); 3465 } 3466 3467 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = { 3468 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA, 3469 .drv_type = DPAA2_CRYPTO, 3470 .driver = { 3471 .name = "DPAA2 SEC PMD" 3472 }, 3473 .probe = cryptodev_dpaa2_sec_probe, 3474 .remove = cryptodev_dpaa2_sec_remove, 3475 }; 3476 3477 static struct cryptodev_driver dpaa2_sec_crypto_drv; 3478 3479 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver); 3480 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, 3481 rte_dpaa2_sec_driver.driver, cryptodev_driver_id); 3482 3483 RTE_INIT(dpaa2_sec_init_log) 3484 { 3485 /* Bus level logs */ 3486 dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2"); 3487 if (dpaa2_logtype_sec >= 0) 3488 rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE); 3489 } 3490