1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2016-2018 NXP 5 * 6 */ 7 8 #include <time.h> 9 #include <net/if.h> 10 11 #include <rte_mbuf.h> 12 #include <rte_cryptodev.h> 13 #include <rte_malloc.h> 14 #include <rte_memcpy.h> 15 #include <rte_string_fns.h> 16 #include <rte_cycles.h> 17 #include <rte_kvargs.h> 18 #include <rte_dev.h> 19 #include <rte_cryptodev_pmd.h> 20 #include <rte_common.h> 21 #include <rte_fslmc.h> 22 #include <fslmc_vfio.h> 23 #include <dpaa2_hw_pvt.h> 24 #include <dpaa2_hw_dpio.h> 25 #include <dpaa2_hw_mempool.h> 26 #include <fsl_dpopr.h> 27 #include <fsl_dpseci.h> 28 #include <fsl_mc_sys.h> 29 30 #include "dpaa2_sec_priv.h" 31 #include "dpaa2_sec_event.h" 32 #include "dpaa2_sec_logs.h" 33 34 /* Required types */ 35 typedef uint64_t dma_addr_t; 36 37 /* RTA header files */ 38 #include <hw/desc/ipsec.h> 39 #include <hw/desc/pdcp.h> 40 #include <hw/desc/algo.h> 41 42 /* Minimum job descriptor consists of a oneword job descriptor HEADER and 43 * a pointer to the shared descriptor 44 */ 45 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ) 46 #define FSL_VENDOR_ID 0x1957 47 #define FSL_DEVICE_ID 0x410 48 #define FSL_SUBSYSTEM_SEC 1 49 #define FSL_MC_DPSECI_DEVID 3 50 51 #define NO_PREFETCH 0 52 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */ 53 #define FLE_POOL_NUM_BUFS 32000 54 #define FLE_POOL_BUF_SIZE 256 55 #define FLE_POOL_CACHE_SIZE 512 56 #define FLE_SG_MEM_SIZE 2048 57 #define SEC_FLC_DHR_OUTBOUND -114 58 #define SEC_FLC_DHR_INBOUND 0 59 60 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8; 61 62 static uint8_t cryptodev_driver_id; 63 64 int dpaa2_logtype_sec; 65 66 static inline int 67 build_proto_compound_fd(dpaa2_sec_session *sess, 68 struct rte_crypto_op *op, 69 struct qbman_fd *fd, uint16_t bpid) 70 { 71 struct rte_crypto_sym_op *sym_op = op->sym; 72 struct ctxt_priv *priv = sess->ctxt; 73 struct qbman_fle *fle, *ip_fle, *op_fle; 74 struct sec_flow_context *flc; 75 struct rte_mbuf *src_mbuf = sym_op->m_src; 76 struct rte_mbuf *dst_mbuf = sym_op->m_dst; 77 int retval; 78 79 if (!dst_mbuf) 80 dst_mbuf = src_mbuf; 81 82 /* Save the shared descriptor */ 83 flc = &priv->flc_desc[0].flc; 84 85 /* we are using the first FLE entry to store Mbuf */ 86 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 87 if (retval) { 88 DPAA2_SEC_ERR("Memory alloc failed"); 89 return -1; 90 } 91 memset(fle, 0, FLE_POOL_BUF_SIZE); 92 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 93 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 94 95 op_fle = fle + 1; 96 ip_fle = fle + 2; 97 98 if (likely(bpid < MAX_BPID)) { 99 DPAA2_SET_FD_BPID(fd, bpid); 100 DPAA2_SET_FLE_BPID(op_fle, bpid); 101 DPAA2_SET_FLE_BPID(ip_fle, bpid); 102 } else { 103 DPAA2_SET_FD_IVP(fd); 104 DPAA2_SET_FLE_IVP(op_fle); 105 DPAA2_SET_FLE_IVP(ip_fle); 106 } 107 108 /* Configure FD as a FRAME LIST */ 109 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 110 DPAA2_SET_FD_COMPOUND_FMT(fd); 111 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 112 113 /* Configure Output FLE with dst mbuf data */ 114 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf)); 115 DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off); 116 DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len); 117 118 /* Configure Input FLE with src mbuf data */ 119 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf)); 120 DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off); 121 DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len); 122 123 DPAA2_SET_FD_LEN(fd, ip_fle->length); 124 DPAA2_SET_FLE_FIN(ip_fle); 125 126 #ifdef ENABLE_HFN_OVERRIDE 127 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) { 128 /*enable HFN override override */ 129 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, sess->pdcp.hfn_ovd); 130 DPAA2_SET_FLE_INTERNAL_JD(op_fle, sess->pdcp.hfn_ovd); 131 DPAA2_SET_FD_INTERNAL_JD(fd, sess->pdcp.hfn_ovd); 132 } 133 #endif 134 135 return 0; 136 137 } 138 139 static inline int 140 build_proto_fd(dpaa2_sec_session *sess, 141 struct rte_crypto_op *op, 142 struct qbman_fd *fd, uint16_t bpid) 143 { 144 struct rte_crypto_sym_op *sym_op = op->sym; 145 if (sym_op->m_dst) 146 return build_proto_compound_fd(sess, op, fd, bpid); 147 148 struct ctxt_priv *priv = sess->ctxt; 149 struct sec_flow_context *flc; 150 struct rte_mbuf *mbuf = sym_op->m_src; 151 152 if (likely(bpid < MAX_BPID)) 153 DPAA2_SET_FD_BPID(fd, bpid); 154 else 155 DPAA2_SET_FD_IVP(fd); 156 157 /* Save the shared descriptor */ 158 flc = &priv->flc_desc[0].flc; 159 160 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 161 DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off); 162 DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len); 163 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 164 165 /* save physical address of mbuf */ 166 op->sym->aead.digest.phys_addr = mbuf->buf_iova; 167 mbuf->buf_iova = (size_t)op; 168 169 return 0; 170 } 171 172 static inline int 173 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess, 174 struct rte_crypto_op *op, 175 struct qbman_fd *fd, __rte_unused uint16_t bpid) 176 { 177 struct rte_crypto_sym_op *sym_op = op->sym; 178 struct ctxt_priv *priv = sess->ctxt; 179 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 180 struct sec_flow_context *flc; 181 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len; 182 int icv_len = sess->digest_length; 183 uint8_t *old_icv; 184 struct rte_mbuf *mbuf; 185 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 186 sess->iv.offset); 187 188 PMD_INIT_FUNC_TRACE(); 189 190 if (sym_op->m_dst) 191 mbuf = sym_op->m_dst; 192 else 193 mbuf = sym_op->m_src; 194 195 /* first FLE entry used to store mbuf and session ctxt */ 196 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE, 197 RTE_CACHE_LINE_SIZE); 198 if (unlikely(!fle)) { 199 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE"); 200 return -1; 201 } 202 memset(fle, 0, FLE_SG_MEM_SIZE); 203 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 204 DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv); 205 206 op_fle = fle + 1; 207 ip_fle = fle + 2; 208 sge = fle + 3; 209 210 /* Save the shared descriptor */ 211 flc = &priv->flc_desc[0].flc; 212 213 /* Configure FD as a FRAME LIST */ 214 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 215 DPAA2_SET_FD_COMPOUND_FMT(fd); 216 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 217 218 DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n" 219 "iv-len=%d data_off: 0x%x\n", 220 sym_op->aead.data.offset, 221 sym_op->aead.data.length, 222 sess->digest_length, 223 sess->iv.length, 224 sym_op->m_src->data_off); 225 226 /* Configure Output FLE with Scatter/Gather Entry */ 227 DPAA2_SET_FLE_SG_EXT(op_fle); 228 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 229 230 if (auth_only_len) 231 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len); 232 233 op_fle->length = (sess->dir == DIR_ENC) ? 234 (sym_op->aead.data.length + icv_len + auth_only_len) : 235 sym_op->aead.data.length + auth_only_len; 236 237 /* Configure Output SGE for Encap/Decap */ 238 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 239 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset - 240 auth_only_len); 241 sge->length = mbuf->data_len - sym_op->aead.data.offset + auth_only_len; 242 243 mbuf = mbuf->next; 244 /* o/p segs */ 245 while (mbuf) { 246 sge++; 247 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 248 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 249 sge->length = mbuf->data_len; 250 mbuf = mbuf->next; 251 } 252 sge->length -= icv_len; 253 254 if (sess->dir == DIR_ENC) { 255 sge++; 256 DPAA2_SET_FLE_ADDR(sge, 257 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data)); 258 sge->length = icv_len; 259 } 260 DPAA2_SET_FLE_FIN(sge); 261 262 sge++; 263 mbuf = sym_op->m_src; 264 265 /* Configure Input FLE with Scatter/Gather Entry */ 266 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 267 DPAA2_SET_FLE_SG_EXT(ip_fle); 268 DPAA2_SET_FLE_FIN(ip_fle); 269 ip_fle->length = (sess->dir == DIR_ENC) ? 270 (sym_op->aead.data.length + sess->iv.length + auth_only_len) : 271 (sym_op->aead.data.length + sess->iv.length + auth_only_len + 272 icv_len); 273 274 /* Configure Input SGE for Encap/Decap */ 275 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr)); 276 sge->length = sess->iv.length; 277 278 sge++; 279 if (auth_only_len) { 280 DPAA2_SET_FLE_ADDR(sge, 281 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data)); 282 sge->length = auth_only_len; 283 sge++; 284 } 285 286 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 287 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + 288 mbuf->data_off); 289 sge->length = mbuf->data_len - sym_op->aead.data.offset; 290 291 mbuf = mbuf->next; 292 /* i/p segs */ 293 while (mbuf) { 294 sge++; 295 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 296 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 297 sge->length = mbuf->data_len; 298 mbuf = mbuf->next; 299 } 300 301 if (sess->dir == DIR_DEC) { 302 sge++; 303 old_icv = (uint8_t *)(sge + 1); 304 memcpy(old_icv, sym_op->aead.digest.data, icv_len); 305 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 306 sge->length = icv_len; 307 } 308 309 DPAA2_SET_FLE_FIN(sge); 310 if (auth_only_len) { 311 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len); 312 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 313 } 314 DPAA2_SET_FD_LEN(fd, ip_fle->length); 315 316 return 0; 317 } 318 319 static inline int 320 build_authenc_gcm_fd(dpaa2_sec_session *sess, 321 struct rte_crypto_op *op, 322 struct qbman_fd *fd, uint16_t bpid) 323 { 324 struct rte_crypto_sym_op *sym_op = op->sym; 325 struct ctxt_priv *priv = sess->ctxt; 326 struct qbman_fle *fle, *sge; 327 struct sec_flow_context *flc; 328 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len; 329 int icv_len = sess->digest_length, retval; 330 uint8_t *old_icv; 331 struct rte_mbuf *dst; 332 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 333 sess->iv.offset); 334 335 PMD_INIT_FUNC_TRACE(); 336 337 if (sym_op->m_dst) 338 dst = sym_op->m_dst; 339 else 340 dst = sym_op->m_src; 341 342 /* TODO we are using the first FLE entry to store Mbuf and session ctxt. 343 * Currently we donot know which FLE has the mbuf stored. 344 * So while retreiving we can go back 1 FLE from the FD -ADDR 345 * to get the MBUF Addr from the previous FLE. 346 * We can have a better approach to use the inline Mbuf 347 */ 348 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 349 if (retval) { 350 DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE"); 351 return -1; 352 } 353 memset(fle, 0, FLE_POOL_BUF_SIZE); 354 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 355 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 356 fle = fle + 1; 357 sge = fle + 2; 358 if (likely(bpid < MAX_BPID)) { 359 DPAA2_SET_FD_BPID(fd, bpid); 360 DPAA2_SET_FLE_BPID(fle, bpid); 361 DPAA2_SET_FLE_BPID(fle + 1, bpid); 362 DPAA2_SET_FLE_BPID(sge, bpid); 363 DPAA2_SET_FLE_BPID(sge + 1, bpid); 364 DPAA2_SET_FLE_BPID(sge + 2, bpid); 365 DPAA2_SET_FLE_BPID(sge + 3, bpid); 366 } else { 367 DPAA2_SET_FD_IVP(fd); 368 DPAA2_SET_FLE_IVP(fle); 369 DPAA2_SET_FLE_IVP((fle + 1)); 370 DPAA2_SET_FLE_IVP(sge); 371 DPAA2_SET_FLE_IVP((sge + 1)); 372 DPAA2_SET_FLE_IVP((sge + 2)); 373 DPAA2_SET_FLE_IVP((sge + 3)); 374 } 375 376 /* Save the shared descriptor */ 377 flc = &priv->flc_desc[0].flc; 378 /* Configure FD as a FRAME LIST */ 379 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 380 DPAA2_SET_FD_COMPOUND_FMT(fd); 381 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 382 383 DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n" 384 "iv-len=%d data_off: 0x%x\n", 385 sym_op->aead.data.offset, 386 sym_op->aead.data.length, 387 sess->digest_length, 388 sess->iv.length, 389 sym_op->m_src->data_off); 390 391 /* Configure Output FLE with Scatter/Gather Entry */ 392 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 393 if (auth_only_len) 394 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 395 fle->length = (sess->dir == DIR_ENC) ? 396 (sym_op->aead.data.length + icv_len + auth_only_len) : 397 sym_op->aead.data.length + auth_only_len; 398 399 DPAA2_SET_FLE_SG_EXT(fle); 400 401 /* Configure Output SGE for Encap/Decap */ 402 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 403 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + 404 dst->data_off - auth_only_len); 405 sge->length = sym_op->aead.data.length + auth_only_len; 406 407 if (sess->dir == DIR_ENC) { 408 sge++; 409 DPAA2_SET_FLE_ADDR(sge, 410 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data)); 411 sge->length = sess->digest_length; 412 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length + 413 sess->iv.length + auth_only_len)); 414 } 415 DPAA2_SET_FLE_FIN(sge); 416 417 sge++; 418 fle++; 419 420 /* Configure Input FLE with Scatter/Gather Entry */ 421 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 422 DPAA2_SET_FLE_SG_EXT(fle); 423 DPAA2_SET_FLE_FIN(fle); 424 fle->length = (sess->dir == DIR_ENC) ? 425 (sym_op->aead.data.length + sess->iv.length + auth_only_len) : 426 (sym_op->aead.data.length + sess->iv.length + auth_only_len + 427 sess->digest_length); 428 429 /* Configure Input SGE for Encap/Decap */ 430 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr)); 431 sge->length = sess->iv.length; 432 sge++; 433 if (auth_only_len) { 434 DPAA2_SET_FLE_ADDR(sge, 435 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data)); 436 sge->length = auth_only_len; 437 DPAA2_SET_FLE_BPID(sge, bpid); 438 sge++; 439 } 440 441 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 442 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + 443 sym_op->m_src->data_off); 444 sge->length = sym_op->aead.data.length; 445 if (sess->dir == DIR_DEC) { 446 sge++; 447 old_icv = (uint8_t *)(sge + 1); 448 memcpy(old_icv, sym_op->aead.digest.data, 449 sess->digest_length); 450 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 451 sge->length = sess->digest_length; 452 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length + 453 sess->digest_length + 454 sess->iv.length + 455 auth_only_len)); 456 } 457 DPAA2_SET_FLE_FIN(sge); 458 459 if (auth_only_len) { 460 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 461 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 462 } 463 464 return 0; 465 } 466 467 static inline int 468 build_authenc_sg_fd(dpaa2_sec_session *sess, 469 struct rte_crypto_op *op, 470 struct qbman_fd *fd, __rte_unused uint16_t bpid) 471 { 472 struct rte_crypto_sym_op *sym_op = op->sym; 473 struct ctxt_priv *priv = sess->ctxt; 474 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 475 struct sec_flow_context *flc; 476 uint32_t auth_only_len = sym_op->auth.data.length - 477 sym_op->cipher.data.length; 478 int icv_len = sess->digest_length; 479 uint8_t *old_icv; 480 struct rte_mbuf *mbuf; 481 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 482 sess->iv.offset); 483 484 PMD_INIT_FUNC_TRACE(); 485 486 if (sym_op->m_dst) 487 mbuf = sym_op->m_dst; 488 else 489 mbuf = sym_op->m_src; 490 491 /* first FLE entry used to store mbuf and session ctxt */ 492 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE, 493 RTE_CACHE_LINE_SIZE); 494 if (unlikely(!fle)) { 495 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE"); 496 return -1; 497 } 498 memset(fle, 0, FLE_SG_MEM_SIZE); 499 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 500 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 501 502 op_fle = fle + 1; 503 ip_fle = fle + 2; 504 sge = fle + 3; 505 506 /* Save the shared descriptor */ 507 flc = &priv->flc_desc[0].flc; 508 509 /* Configure FD as a FRAME LIST */ 510 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 511 DPAA2_SET_FD_COMPOUND_FMT(fd); 512 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 513 514 DPAA2_SEC_DP_DEBUG( 515 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n" 516 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", 517 sym_op->auth.data.offset, 518 sym_op->auth.data.length, 519 sess->digest_length, 520 sym_op->cipher.data.offset, 521 sym_op->cipher.data.length, 522 sess->iv.length, 523 sym_op->m_src->data_off); 524 525 /* Configure Output FLE with Scatter/Gather Entry */ 526 DPAA2_SET_FLE_SG_EXT(op_fle); 527 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 528 529 if (auth_only_len) 530 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len); 531 532 op_fle->length = (sess->dir == DIR_ENC) ? 533 (sym_op->cipher.data.length + icv_len) : 534 sym_op->cipher.data.length; 535 536 /* Configure Output SGE for Encap/Decap */ 537 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 538 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset); 539 sge->length = mbuf->data_len - sym_op->auth.data.offset; 540 541 mbuf = mbuf->next; 542 /* o/p segs */ 543 while (mbuf) { 544 sge++; 545 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 546 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 547 sge->length = mbuf->data_len; 548 mbuf = mbuf->next; 549 } 550 sge->length -= icv_len; 551 552 if (sess->dir == DIR_ENC) { 553 sge++; 554 DPAA2_SET_FLE_ADDR(sge, 555 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 556 sge->length = icv_len; 557 } 558 DPAA2_SET_FLE_FIN(sge); 559 560 sge++; 561 mbuf = sym_op->m_src; 562 563 /* Configure Input FLE with Scatter/Gather Entry */ 564 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 565 DPAA2_SET_FLE_SG_EXT(ip_fle); 566 DPAA2_SET_FLE_FIN(ip_fle); 567 ip_fle->length = (sess->dir == DIR_ENC) ? 568 (sym_op->auth.data.length + sess->iv.length) : 569 (sym_op->auth.data.length + sess->iv.length + 570 icv_len); 571 572 /* Configure Input SGE for Encap/Decap */ 573 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 574 sge->length = sess->iv.length; 575 576 sge++; 577 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 578 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + 579 mbuf->data_off); 580 sge->length = mbuf->data_len - sym_op->auth.data.offset; 581 582 mbuf = mbuf->next; 583 /* i/p segs */ 584 while (mbuf) { 585 sge++; 586 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 587 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 588 sge->length = mbuf->data_len; 589 mbuf = mbuf->next; 590 } 591 sge->length -= icv_len; 592 593 if (sess->dir == DIR_DEC) { 594 sge++; 595 old_icv = (uint8_t *)(sge + 1); 596 memcpy(old_icv, sym_op->auth.digest.data, 597 icv_len); 598 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 599 sge->length = icv_len; 600 } 601 602 DPAA2_SET_FLE_FIN(sge); 603 if (auth_only_len) { 604 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len); 605 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 606 } 607 DPAA2_SET_FD_LEN(fd, ip_fle->length); 608 609 return 0; 610 } 611 612 static inline int 613 build_authenc_fd(dpaa2_sec_session *sess, 614 struct rte_crypto_op *op, 615 struct qbman_fd *fd, uint16_t bpid) 616 { 617 struct rte_crypto_sym_op *sym_op = op->sym; 618 struct ctxt_priv *priv = sess->ctxt; 619 struct qbman_fle *fle, *sge; 620 struct sec_flow_context *flc; 621 uint32_t auth_only_len = sym_op->auth.data.length - 622 sym_op->cipher.data.length; 623 int icv_len = sess->digest_length, retval; 624 uint8_t *old_icv; 625 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 626 sess->iv.offset); 627 struct rte_mbuf *dst; 628 629 PMD_INIT_FUNC_TRACE(); 630 631 if (sym_op->m_dst) 632 dst = sym_op->m_dst; 633 else 634 dst = sym_op->m_src; 635 636 /* we are using the first FLE entry to store Mbuf. 637 * Currently we donot know which FLE has the mbuf stored. 638 * So while retreiving we can go back 1 FLE from the FD -ADDR 639 * to get the MBUF Addr from the previous FLE. 640 * We can have a better approach to use the inline Mbuf 641 */ 642 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 643 if (retval) { 644 DPAA2_SEC_ERR("Memory alloc failed for SGE"); 645 return -1; 646 } 647 memset(fle, 0, FLE_POOL_BUF_SIZE); 648 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 649 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 650 fle = fle + 1; 651 sge = fle + 2; 652 if (likely(bpid < MAX_BPID)) { 653 DPAA2_SET_FD_BPID(fd, bpid); 654 DPAA2_SET_FLE_BPID(fle, bpid); 655 DPAA2_SET_FLE_BPID(fle + 1, bpid); 656 DPAA2_SET_FLE_BPID(sge, bpid); 657 DPAA2_SET_FLE_BPID(sge + 1, bpid); 658 DPAA2_SET_FLE_BPID(sge + 2, bpid); 659 DPAA2_SET_FLE_BPID(sge + 3, bpid); 660 } else { 661 DPAA2_SET_FD_IVP(fd); 662 DPAA2_SET_FLE_IVP(fle); 663 DPAA2_SET_FLE_IVP((fle + 1)); 664 DPAA2_SET_FLE_IVP(sge); 665 DPAA2_SET_FLE_IVP((sge + 1)); 666 DPAA2_SET_FLE_IVP((sge + 2)); 667 DPAA2_SET_FLE_IVP((sge + 3)); 668 } 669 670 /* Save the shared descriptor */ 671 flc = &priv->flc_desc[0].flc; 672 /* Configure FD as a FRAME LIST */ 673 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 674 DPAA2_SET_FD_COMPOUND_FMT(fd); 675 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 676 677 DPAA2_SEC_DP_DEBUG( 678 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n" 679 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", 680 sym_op->auth.data.offset, 681 sym_op->auth.data.length, 682 sess->digest_length, 683 sym_op->cipher.data.offset, 684 sym_op->cipher.data.length, 685 sess->iv.length, 686 sym_op->m_src->data_off); 687 688 /* Configure Output FLE with Scatter/Gather Entry */ 689 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 690 if (auth_only_len) 691 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 692 fle->length = (sess->dir == DIR_ENC) ? 693 (sym_op->cipher.data.length + icv_len) : 694 sym_op->cipher.data.length; 695 696 DPAA2_SET_FLE_SG_EXT(fle); 697 698 /* Configure Output SGE for Encap/Decap */ 699 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 700 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + 701 dst->data_off); 702 sge->length = sym_op->cipher.data.length; 703 704 if (sess->dir == DIR_ENC) { 705 sge++; 706 DPAA2_SET_FLE_ADDR(sge, 707 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 708 sge->length = sess->digest_length; 709 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 710 sess->iv.length)); 711 } 712 DPAA2_SET_FLE_FIN(sge); 713 714 sge++; 715 fle++; 716 717 /* Configure Input FLE with Scatter/Gather Entry */ 718 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 719 DPAA2_SET_FLE_SG_EXT(fle); 720 DPAA2_SET_FLE_FIN(fle); 721 fle->length = (sess->dir == DIR_ENC) ? 722 (sym_op->auth.data.length + sess->iv.length) : 723 (sym_op->auth.data.length + sess->iv.length + 724 sess->digest_length); 725 726 /* Configure Input SGE for Encap/Decap */ 727 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 728 sge->length = sess->iv.length; 729 sge++; 730 731 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 732 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + 733 sym_op->m_src->data_off); 734 sge->length = sym_op->auth.data.length; 735 if (sess->dir == DIR_DEC) { 736 sge++; 737 old_icv = (uint8_t *)(sge + 1); 738 memcpy(old_icv, sym_op->auth.digest.data, 739 sess->digest_length); 740 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 741 sge->length = sess->digest_length; 742 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 743 sess->digest_length + 744 sess->iv.length)); 745 } 746 DPAA2_SET_FLE_FIN(sge); 747 if (auth_only_len) { 748 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 749 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 750 } 751 return 0; 752 } 753 754 static inline int build_auth_sg_fd( 755 dpaa2_sec_session *sess, 756 struct rte_crypto_op *op, 757 struct qbman_fd *fd, 758 __rte_unused uint16_t bpid) 759 { 760 struct rte_crypto_sym_op *sym_op = op->sym; 761 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 762 struct sec_flow_context *flc; 763 struct ctxt_priv *priv = sess->ctxt; 764 uint8_t *old_digest; 765 struct rte_mbuf *mbuf; 766 767 PMD_INIT_FUNC_TRACE(); 768 769 mbuf = sym_op->m_src; 770 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE, 771 RTE_CACHE_LINE_SIZE); 772 if (unlikely(!fle)) { 773 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE"); 774 return -1; 775 } 776 memset(fle, 0, FLE_SG_MEM_SIZE); 777 /* first FLE entry used to store mbuf and session ctxt */ 778 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 779 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 780 op_fle = fle + 1; 781 ip_fle = fle + 2; 782 sge = fle + 3; 783 784 flc = &priv->flc_desc[DESC_INITFINAL].flc; 785 /* sg FD */ 786 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 787 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 788 DPAA2_SET_FD_COMPOUND_FMT(fd); 789 790 /* o/p fle */ 791 DPAA2_SET_FLE_ADDR(op_fle, 792 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 793 op_fle->length = sess->digest_length; 794 795 /* i/p fle */ 796 DPAA2_SET_FLE_SG_EXT(ip_fle); 797 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 798 /* i/p 1st seg */ 799 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 800 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + mbuf->data_off); 801 sge->length = mbuf->data_len - sym_op->auth.data.offset; 802 803 /* i/p segs */ 804 mbuf = mbuf->next; 805 while (mbuf) { 806 sge++; 807 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 808 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 809 sge->length = mbuf->data_len; 810 mbuf = mbuf->next; 811 } 812 if (sess->dir == DIR_ENC) { 813 /* Digest calculation case */ 814 sge->length -= sess->digest_length; 815 ip_fle->length = sym_op->auth.data.length; 816 } else { 817 /* Digest verification case */ 818 sge++; 819 old_digest = (uint8_t *)(sge + 1); 820 rte_memcpy(old_digest, sym_op->auth.digest.data, 821 sess->digest_length); 822 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest)); 823 sge->length = sess->digest_length; 824 ip_fle->length = sym_op->auth.data.length + 825 sess->digest_length; 826 } 827 DPAA2_SET_FLE_FIN(sge); 828 DPAA2_SET_FLE_FIN(ip_fle); 829 DPAA2_SET_FD_LEN(fd, ip_fle->length); 830 831 return 0; 832 } 833 834 static inline int 835 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 836 struct qbman_fd *fd, uint16_t bpid) 837 { 838 struct rte_crypto_sym_op *sym_op = op->sym; 839 struct qbman_fle *fle, *sge; 840 struct sec_flow_context *flc; 841 struct ctxt_priv *priv = sess->ctxt; 842 uint8_t *old_digest; 843 int retval; 844 845 PMD_INIT_FUNC_TRACE(); 846 847 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 848 if (retval) { 849 DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE"); 850 return -1; 851 } 852 memset(fle, 0, FLE_POOL_BUF_SIZE); 853 /* TODO we are using the first FLE entry to store Mbuf. 854 * Currently we donot know which FLE has the mbuf stored. 855 * So while retreiving we can go back 1 FLE from the FD -ADDR 856 * to get the MBUF Addr from the previous FLE. 857 * We can have a better approach to use the inline Mbuf 858 */ 859 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 860 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 861 fle = fle + 1; 862 863 if (likely(bpid < MAX_BPID)) { 864 DPAA2_SET_FD_BPID(fd, bpid); 865 DPAA2_SET_FLE_BPID(fle, bpid); 866 DPAA2_SET_FLE_BPID(fle + 1, bpid); 867 } else { 868 DPAA2_SET_FD_IVP(fd); 869 DPAA2_SET_FLE_IVP(fle); 870 DPAA2_SET_FLE_IVP((fle + 1)); 871 } 872 flc = &priv->flc_desc[DESC_INITFINAL].flc; 873 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 874 875 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 876 fle->length = sess->digest_length; 877 878 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 879 DPAA2_SET_FD_COMPOUND_FMT(fd); 880 fle++; 881 882 if (sess->dir == DIR_ENC) { 883 DPAA2_SET_FLE_ADDR(fle, 884 DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 885 DPAA2_SET_FLE_OFFSET(fle, sym_op->auth.data.offset + 886 sym_op->m_src->data_off); 887 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length); 888 fle->length = sym_op->auth.data.length; 889 } else { 890 sge = fle + 2; 891 DPAA2_SET_FLE_SG_EXT(fle); 892 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 893 894 if (likely(bpid < MAX_BPID)) { 895 DPAA2_SET_FLE_BPID(sge, bpid); 896 DPAA2_SET_FLE_BPID(sge + 1, bpid); 897 } else { 898 DPAA2_SET_FLE_IVP(sge); 899 DPAA2_SET_FLE_IVP((sge + 1)); 900 } 901 DPAA2_SET_FLE_ADDR(sge, 902 DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 903 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + 904 sym_op->m_src->data_off); 905 906 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length + 907 sess->digest_length); 908 sge->length = sym_op->auth.data.length; 909 sge++; 910 old_digest = (uint8_t *)(sge + 1); 911 rte_memcpy(old_digest, sym_op->auth.digest.data, 912 sess->digest_length); 913 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest)); 914 sge->length = sess->digest_length; 915 fle->length = sym_op->auth.data.length + 916 sess->digest_length; 917 DPAA2_SET_FLE_FIN(sge); 918 } 919 DPAA2_SET_FLE_FIN(fle); 920 921 return 0; 922 } 923 924 static int 925 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 926 struct qbman_fd *fd, __rte_unused uint16_t bpid) 927 { 928 struct rte_crypto_sym_op *sym_op = op->sym; 929 struct qbman_fle *ip_fle, *op_fle, *sge, *fle; 930 struct sec_flow_context *flc; 931 struct ctxt_priv *priv = sess->ctxt; 932 struct rte_mbuf *mbuf; 933 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 934 sess->iv.offset); 935 936 PMD_INIT_FUNC_TRACE(); 937 938 if (sym_op->m_dst) 939 mbuf = sym_op->m_dst; 940 else 941 mbuf = sym_op->m_src; 942 943 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE, 944 RTE_CACHE_LINE_SIZE); 945 if (!fle) { 946 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE"); 947 return -1; 948 } 949 memset(fle, 0, FLE_SG_MEM_SIZE); 950 /* first FLE entry used to store mbuf and session ctxt */ 951 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 952 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 953 954 op_fle = fle + 1; 955 ip_fle = fle + 2; 956 sge = fle + 3; 957 958 flc = &priv->flc_desc[0].flc; 959 960 DPAA2_SEC_DP_DEBUG( 961 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d" 962 " data_off: 0x%x\n", 963 sym_op->cipher.data.offset, 964 sym_op->cipher.data.length, 965 sess->iv.length, 966 sym_op->m_src->data_off); 967 968 /* o/p fle */ 969 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 970 op_fle->length = sym_op->cipher.data.length; 971 DPAA2_SET_FLE_SG_EXT(op_fle); 972 973 /* o/p 1st seg */ 974 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 975 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + mbuf->data_off); 976 sge->length = mbuf->data_len - sym_op->cipher.data.offset; 977 978 mbuf = mbuf->next; 979 /* o/p segs */ 980 while (mbuf) { 981 sge++; 982 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 983 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 984 sge->length = mbuf->data_len; 985 mbuf = mbuf->next; 986 } 987 DPAA2_SET_FLE_FIN(sge); 988 989 DPAA2_SEC_DP_DEBUG( 990 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n", 991 flc, fle, fle->addr_hi, fle->addr_lo, 992 fle->length); 993 994 /* i/p fle */ 995 mbuf = sym_op->m_src; 996 sge++; 997 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 998 ip_fle->length = sess->iv.length + sym_op->cipher.data.length; 999 DPAA2_SET_FLE_SG_EXT(ip_fle); 1000 1001 /* i/p IV */ 1002 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1003 DPAA2_SET_FLE_OFFSET(sge, 0); 1004 sge->length = sess->iv.length; 1005 1006 sge++; 1007 1008 /* i/p 1st seg */ 1009 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1010 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + 1011 mbuf->data_off); 1012 sge->length = mbuf->data_len - sym_op->cipher.data.offset; 1013 1014 mbuf = mbuf->next; 1015 /* i/p segs */ 1016 while (mbuf) { 1017 sge++; 1018 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1019 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 1020 sge->length = mbuf->data_len; 1021 mbuf = mbuf->next; 1022 } 1023 DPAA2_SET_FLE_FIN(sge); 1024 DPAA2_SET_FLE_FIN(ip_fle); 1025 1026 /* sg fd */ 1027 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 1028 DPAA2_SET_FD_LEN(fd, ip_fle->length); 1029 DPAA2_SET_FD_COMPOUND_FMT(fd); 1030 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1031 1032 DPAA2_SEC_DP_DEBUG( 1033 "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d" 1034 " off =%d, len =%d\n", 1035 DPAA2_GET_FD_ADDR(fd), 1036 DPAA2_GET_FD_BPID(fd), 1037 rte_dpaa2_bpid_info[bpid].meta_data_size, 1038 DPAA2_GET_FD_OFFSET(fd), 1039 DPAA2_GET_FD_LEN(fd)); 1040 return 0; 1041 } 1042 1043 static int 1044 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 1045 struct qbman_fd *fd, uint16_t bpid) 1046 { 1047 struct rte_crypto_sym_op *sym_op = op->sym; 1048 struct qbman_fle *fle, *sge; 1049 int retval; 1050 struct sec_flow_context *flc; 1051 struct ctxt_priv *priv = sess->ctxt; 1052 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1053 sess->iv.offset); 1054 struct rte_mbuf *dst; 1055 1056 PMD_INIT_FUNC_TRACE(); 1057 1058 if (sym_op->m_dst) 1059 dst = sym_op->m_dst; 1060 else 1061 dst = sym_op->m_src; 1062 1063 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 1064 if (retval) { 1065 DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE"); 1066 return -1; 1067 } 1068 memset(fle, 0, FLE_POOL_BUF_SIZE); 1069 /* TODO we are using the first FLE entry to store Mbuf. 1070 * Currently we donot know which FLE has the mbuf stored. 1071 * So while retreiving we can go back 1 FLE from the FD -ADDR 1072 * to get the MBUF Addr from the previous FLE. 1073 * We can have a better approach to use the inline Mbuf 1074 */ 1075 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 1076 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 1077 fle = fle + 1; 1078 sge = fle + 2; 1079 1080 if (likely(bpid < MAX_BPID)) { 1081 DPAA2_SET_FD_BPID(fd, bpid); 1082 DPAA2_SET_FLE_BPID(fle, bpid); 1083 DPAA2_SET_FLE_BPID(fle + 1, bpid); 1084 DPAA2_SET_FLE_BPID(sge, bpid); 1085 DPAA2_SET_FLE_BPID(sge + 1, bpid); 1086 } else { 1087 DPAA2_SET_FD_IVP(fd); 1088 DPAA2_SET_FLE_IVP(fle); 1089 DPAA2_SET_FLE_IVP((fle + 1)); 1090 DPAA2_SET_FLE_IVP(sge); 1091 DPAA2_SET_FLE_IVP((sge + 1)); 1092 } 1093 1094 flc = &priv->flc_desc[0].flc; 1095 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 1096 DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length + 1097 sess->iv.length); 1098 DPAA2_SET_FD_COMPOUND_FMT(fd); 1099 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1100 1101 DPAA2_SEC_DP_DEBUG( 1102 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d," 1103 " data_off: 0x%x\n", 1104 sym_op->cipher.data.offset, 1105 sym_op->cipher.data.length, 1106 sess->iv.length, 1107 sym_op->m_src->data_off); 1108 1109 DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 1110 DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset + 1111 dst->data_off); 1112 1113 fle->length = sym_op->cipher.data.length + sess->iv.length; 1114 1115 DPAA2_SEC_DP_DEBUG( 1116 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n", 1117 flc, fle, fle->addr_hi, fle->addr_lo, 1118 fle->length); 1119 1120 fle++; 1121 1122 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 1123 fle->length = sym_op->cipher.data.length + sess->iv.length; 1124 1125 DPAA2_SET_FLE_SG_EXT(fle); 1126 1127 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1128 sge->length = sess->iv.length; 1129 1130 sge++; 1131 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 1132 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + 1133 sym_op->m_src->data_off); 1134 1135 sge->length = sym_op->cipher.data.length; 1136 DPAA2_SET_FLE_FIN(sge); 1137 DPAA2_SET_FLE_FIN(fle); 1138 1139 DPAA2_SEC_DP_DEBUG( 1140 "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d" 1141 " off =%d, len =%d\n", 1142 DPAA2_GET_FD_ADDR(fd), 1143 DPAA2_GET_FD_BPID(fd), 1144 rte_dpaa2_bpid_info[bpid].meta_data_size, 1145 DPAA2_GET_FD_OFFSET(fd), 1146 DPAA2_GET_FD_LEN(fd)); 1147 1148 return 0; 1149 } 1150 1151 static inline int 1152 build_sec_fd(struct rte_crypto_op *op, 1153 struct qbman_fd *fd, uint16_t bpid) 1154 { 1155 int ret = -1; 1156 dpaa2_sec_session *sess; 1157 1158 PMD_INIT_FUNC_TRACE(); 1159 1160 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) 1161 sess = (dpaa2_sec_session *)get_sym_session_private_data( 1162 op->sym->session, cryptodev_driver_id); 1163 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) 1164 sess = (dpaa2_sec_session *)get_sec_session_private_data( 1165 op->sym->sec_session); 1166 else 1167 return -1; 1168 1169 /* Segmented buffer */ 1170 if (unlikely(!rte_pktmbuf_is_contiguous(op->sym->m_src))) { 1171 switch (sess->ctxt_type) { 1172 case DPAA2_SEC_CIPHER: 1173 ret = build_cipher_sg_fd(sess, op, fd, bpid); 1174 break; 1175 case DPAA2_SEC_AUTH: 1176 ret = build_auth_sg_fd(sess, op, fd, bpid); 1177 break; 1178 case DPAA2_SEC_AEAD: 1179 ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid); 1180 break; 1181 case DPAA2_SEC_CIPHER_HASH: 1182 ret = build_authenc_sg_fd(sess, op, fd, bpid); 1183 break; 1184 case DPAA2_SEC_HASH_CIPHER: 1185 default: 1186 DPAA2_SEC_ERR("error: Unsupported session"); 1187 } 1188 } else { 1189 switch (sess->ctxt_type) { 1190 case DPAA2_SEC_CIPHER: 1191 ret = build_cipher_fd(sess, op, fd, bpid); 1192 break; 1193 case DPAA2_SEC_AUTH: 1194 ret = build_auth_fd(sess, op, fd, bpid); 1195 break; 1196 case DPAA2_SEC_AEAD: 1197 ret = build_authenc_gcm_fd(sess, op, fd, bpid); 1198 break; 1199 case DPAA2_SEC_CIPHER_HASH: 1200 ret = build_authenc_fd(sess, op, fd, bpid); 1201 break; 1202 case DPAA2_SEC_IPSEC: 1203 ret = build_proto_fd(sess, op, fd, bpid); 1204 break; 1205 case DPAA2_SEC_PDCP: 1206 ret = build_proto_compound_fd(sess, op, fd, bpid); 1207 break; 1208 case DPAA2_SEC_HASH_CIPHER: 1209 default: 1210 DPAA2_SEC_ERR("error: Unsupported session"); 1211 } 1212 } 1213 return ret; 1214 } 1215 1216 static uint16_t 1217 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, 1218 uint16_t nb_ops) 1219 { 1220 /* Function to transmit the frames to given device and VQ*/ 1221 uint32_t loop; 1222 int32_t ret; 1223 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 1224 uint32_t frames_to_send; 1225 struct qbman_eq_desc eqdesc; 1226 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 1227 struct qbman_swp *swp; 1228 uint16_t num_tx = 0; 1229 uint32_t flags[MAX_TX_RING_SLOTS] = {0}; 1230 /*todo - need to support multiple buffer pools */ 1231 uint16_t bpid; 1232 struct rte_mempool *mb_pool; 1233 1234 if (unlikely(nb_ops == 0)) 1235 return 0; 1236 1237 if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { 1238 DPAA2_SEC_ERR("sessionless crypto op not supported"); 1239 return 0; 1240 } 1241 /*Prepare enqueue descriptor*/ 1242 qbman_eq_desc_clear(&eqdesc); 1243 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); 1244 qbman_eq_desc_set_response(&eqdesc, 0, 0); 1245 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid); 1246 1247 if (!DPAA2_PER_LCORE_DPIO) { 1248 ret = dpaa2_affine_qbman_swp(); 1249 if (ret) { 1250 DPAA2_SEC_ERR("Failure in affining portal"); 1251 return 0; 1252 } 1253 } 1254 swp = DPAA2_PER_LCORE_PORTAL; 1255 1256 while (nb_ops) { 1257 frames_to_send = (nb_ops > dpaa2_eqcr_size) ? 1258 dpaa2_eqcr_size : nb_ops; 1259 1260 for (loop = 0; loop < frames_to_send; loop++) { 1261 if ((*ops)->sym->m_src->seqn) { 1262 uint8_t dqrr_index = (*ops)->sym->m_src->seqn - 1; 1263 1264 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index; 1265 DPAA2_PER_LCORE_DQRR_SIZE--; 1266 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index); 1267 (*ops)->sym->m_src->seqn = DPAA2_INVALID_MBUF_SEQN; 1268 } 1269 1270 /*Clear the unused FD fields before sending*/ 1271 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd)); 1272 mb_pool = (*ops)->sym->m_src->pool; 1273 bpid = mempool_to_bpid(mb_pool); 1274 ret = build_sec_fd(*ops, &fd_arr[loop], bpid); 1275 if (ret) { 1276 DPAA2_SEC_ERR("error: Improper packet contents" 1277 " for crypto operation"); 1278 goto skip_tx; 1279 } 1280 ops++; 1281 } 1282 loop = 0; 1283 while (loop < frames_to_send) { 1284 loop += qbman_swp_enqueue_multiple(swp, &eqdesc, 1285 &fd_arr[loop], 1286 &flags[loop], 1287 frames_to_send - loop); 1288 } 1289 1290 num_tx += frames_to_send; 1291 nb_ops -= frames_to_send; 1292 } 1293 skip_tx: 1294 dpaa2_qp->tx_vq.tx_pkts += num_tx; 1295 dpaa2_qp->tx_vq.err_pkts += nb_ops; 1296 return num_tx; 1297 } 1298 1299 static inline struct rte_crypto_op * 1300 sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id) 1301 { 1302 struct rte_crypto_op *op; 1303 uint16_t len = DPAA2_GET_FD_LEN(fd); 1304 uint16_t diff = 0; 1305 dpaa2_sec_session *sess_priv; 1306 1307 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF( 1308 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)), 1309 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 1310 1311 diff = len - mbuf->pkt_len; 1312 mbuf->pkt_len += diff; 1313 mbuf->data_len += diff; 1314 op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova; 1315 mbuf->buf_iova = op->sym->aead.digest.phys_addr; 1316 op->sym->aead.digest.phys_addr = 0L; 1317 1318 sess_priv = (dpaa2_sec_session *)get_sec_session_private_data( 1319 op->sym->sec_session); 1320 if (sess_priv->dir == DIR_ENC) 1321 mbuf->data_off += SEC_FLC_DHR_OUTBOUND; 1322 else 1323 mbuf->data_off += SEC_FLC_DHR_INBOUND; 1324 1325 return op; 1326 } 1327 1328 static inline struct rte_crypto_op * 1329 sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id) 1330 { 1331 struct qbman_fle *fle; 1332 struct rte_crypto_op *op; 1333 struct ctxt_priv *priv; 1334 struct rte_mbuf *dst, *src; 1335 1336 if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single) 1337 return sec_simple_fd_to_mbuf(fd, driver_id); 1338 1339 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); 1340 1341 DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n", 1342 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset); 1343 1344 /* we are using the first FLE entry to store Mbuf. 1345 * Currently we donot know which FLE has the mbuf stored. 1346 * So while retreiving we can go back 1 FLE from the FD -ADDR 1347 * to get the MBUF Addr from the previous FLE. 1348 * We can have a better approach to use the inline Mbuf 1349 */ 1350 1351 if (unlikely(DPAA2_GET_FD_IVP(fd))) { 1352 /* TODO complete it. */ 1353 DPAA2_SEC_ERR("error: non inline buffer"); 1354 return NULL; 1355 } 1356 op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1)); 1357 1358 /* Prefeth op */ 1359 src = op->sym->m_src; 1360 rte_prefetch0(src); 1361 1362 if (op->sym->m_dst) { 1363 dst = op->sym->m_dst; 1364 rte_prefetch0(dst); 1365 } else 1366 dst = src; 1367 1368 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 1369 dpaa2_sec_session *sess = (dpaa2_sec_session *) 1370 get_sec_session_private_data(op->sym->sec_session); 1371 if (sess->ctxt_type == DPAA2_SEC_IPSEC) { 1372 uint16_t len = DPAA2_GET_FD_LEN(fd); 1373 dst->pkt_len = len; 1374 dst->data_len = len; 1375 } 1376 } 1377 1378 DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p," 1379 " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n", 1380 (void *)dst, 1381 dst->buf_addr, 1382 DPAA2_GET_FD_ADDR(fd), 1383 DPAA2_GET_FD_BPID(fd), 1384 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, 1385 DPAA2_GET_FD_OFFSET(fd), 1386 DPAA2_GET_FD_LEN(fd)); 1387 1388 /* free the fle memory */ 1389 if (likely(rte_pktmbuf_is_contiguous(src))) { 1390 priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1); 1391 rte_mempool_put(priv->fle_pool, (void *)(fle-1)); 1392 } else 1393 rte_free((void *)(fle-1)); 1394 1395 return op; 1396 } 1397 1398 static uint16_t 1399 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, 1400 uint16_t nb_ops) 1401 { 1402 /* Function is responsible to receive frames for a given device and VQ*/ 1403 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 1404 struct rte_cryptodev *dev = 1405 (struct rte_cryptodev *)(dpaa2_qp->rx_vq.dev); 1406 struct qbman_result *dq_storage; 1407 uint32_t fqid = dpaa2_qp->rx_vq.fqid; 1408 int ret, num_rx = 0; 1409 uint8_t is_last = 0, status; 1410 struct qbman_swp *swp; 1411 const struct qbman_fd *fd; 1412 struct qbman_pull_desc pulldesc; 1413 1414 if (!DPAA2_PER_LCORE_DPIO) { 1415 ret = dpaa2_affine_qbman_swp(); 1416 if (ret) { 1417 DPAA2_SEC_ERR("Failure in affining portal"); 1418 return 0; 1419 } 1420 } 1421 swp = DPAA2_PER_LCORE_PORTAL; 1422 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0]; 1423 1424 qbman_pull_desc_clear(&pulldesc); 1425 qbman_pull_desc_set_numframes(&pulldesc, 1426 (nb_ops > dpaa2_dqrr_size) ? 1427 dpaa2_dqrr_size : nb_ops); 1428 qbman_pull_desc_set_fq(&pulldesc, fqid); 1429 qbman_pull_desc_set_storage(&pulldesc, dq_storage, 1430 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage), 1431 1); 1432 1433 /*Issue a volatile dequeue command. */ 1434 while (1) { 1435 if (qbman_swp_pull(swp, &pulldesc)) { 1436 DPAA2_SEC_WARN( 1437 "SEC VDQ command is not issued : QBMAN busy"); 1438 /* Portal was busy, try again */ 1439 continue; 1440 } 1441 break; 1442 }; 1443 1444 /* Receive the packets till Last Dequeue entry is found with 1445 * respect to the above issues PULL command. 1446 */ 1447 while (!is_last) { 1448 /* Check if the previous issued command is completed. 1449 * Also seems like the SWP is shared between the Ethernet Driver 1450 * and the SEC driver. 1451 */ 1452 while (!qbman_check_command_complete(dq_storage)) 1453 ; 1454 1455 /* Loop until the dq_storage is updated with 1456 * new token by QBMAN 1457 */ 1458 while (!qbman_check_new_result(dq_storage)) 1459 ; 1460 /* Check whether Last Pull command is Expired and 1461 * setting Condition for Loop termination 1462 */ 1463 if (qbman_result_DQ_is_pull_complete(dq_storage)) { 1464 is_last = 1; 1465 /* Check for valid frame. */ 1466 status = (uint8_t)qbman_result_DQ_flags(dq_storage); 1467 if (unlikely( 1468 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) { 1469 DPAA2_SEC_DP_DEBUG("No frame is delivered\n"); 1470 continue; 1471 } 1472 } 1473 1474 fd = qbman_result_DQ_fd(dq_storage); 1475 ops[num_rx] = sec_fd_to_mbuf(fd, dev->driver_id); 1476 1477 if (unlikely(fd->simple.frc)) { 1478 /* TODO Parse SEC errors */ 1479 DPAA2_SEC_ERR("SEC returned Error - %x", 1480 fd->simple.frc); 1481 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR; 1482 } else { 1483 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 1484 } 1485 1486 num_rx++; 1487 dq_storage++; 1488 } /* End of Packet Rx loop */ 1489 1490 dpaa2_qp->rx_vq.rx_pkts += num_rx; 1491 1492 DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx); 1493 /*Return the total number of packets received to DPAA2 app*/ 1494 return num_rx; 1495 } 1496 1497 /** Release queue pair */ 1498 static int 1499 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) 1500 { 1501 struct dpaa2_sec_qp *qp = 1502 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id]; 1503 1504 PMD_INIT_FUNC_TRACE(); 1505 1506 if (qp->rx_vq.q_storage) { 1507 dpaa2_free_dq_storage(qp->rx_vq.q_storage); 1508 rte_free(qp->rx_vq.q_storage); 1509 } 1510 rte_free(qp); 1511 1512 dev->data->queue_pairs[queue_pair_id] = NULL; 1513 1514 return 0; 1515 } 1516 1517 /** Setup a queue pair */ 1518 static int 1519 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, 1520 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf, 1521 __rte_unused int socket_id) 1522 { 1523 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 1524 struct dpaa2_sec_qp *qp; 1525 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 1526 struct dpseci_rx_queue_cfg cfg; 1527 int32_t retcode; 1528 1529 PMD_INIT_FUNC_TRACE(); 1530 1531 /* If qp is already in use free ring memory and qp metadata. */ 1532 if (dev->data->queue_pairs[qp_id] != NULL) { 1533 DPAA2_SEC_INFO("QP already setup"); 1534 return 0; 1535 } 1536 1537 DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p", 1538 dev, qp_id, qp_conf); 1539 1540 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 1541 1542 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp), 1543 RTE_CACHE_LINE_SIZE); 1544 if (!qp) { 1545 DPAA2_SEC_ERR("malloc failed for rx/tx queues"); 1546 return -1; 1547 } 1548 1549 qp->rx_vq.dev = dev; 1550 qp->tx_vq.dev = dev; 1551 qp->rx_vq.q_storage = rte_malloc("sec dq storage", 1552 sizeof(struct queue_storage_info_t), 1553 RTE_CACHE_LINE_SIZE); 1554 if (!qp->rx_vq.q_storage) { 1555 DPAA2_SEC_ERR("malloc failed for q_storage"); 1556 return -1; 1557 } 1558 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t)); 1559 1560 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) { 1561 DPAA2_SEC_ERR("Unable to allocate dequeue storage"); 1562 return -1; 1563 } 1564 1565 dev->data->queue_pairs[qp_id] = qp; 1566 1567 cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX; 1568 cfg.user_ctx = (size_t)(&qp->rx_vq); 1569 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 1570 qp_id, &cfg); 1571 return retcode; 1572 } 1573 1574 /** Return the number of allocated queue pairs */ 1575 static uint32_t 1576 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev) 1577 { 1578 PMD_INIT_FUNC_TRACE(); 1579 1580 return dev->data->nb_queue_pairs; 1581 } 1582 1583 /** Returns the size of the aesni gcm session structure */ 1584 static unsigned int 1585 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) 1586 { 1587 PMD_INIT_FUNC_TRACE(); 1588 1589 return sizeof(dpaa2_sec_session); 1590 } 1591 1592 static int 1593 dpaa2_sec_cipher_init(struct rte_cryptodev *dev, 1594 struct rte_crypto_sym_xform *xform, 1595 dpaa2_sec_session *session) 1596 { 1597 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1598 struct alginfo cipherdata; 1599 int bufsize, i; 1600 struct ctxt_priv *priv; 1601 struct sec_flow_context *flc; 1602 1603 PMD_INIT_FUNC_TRACE(); 1604 1605 /* For SEC CIPHER only one descriptor is required. */ 1606 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1607 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 1608 RTE_CACHE_LINE_SIZE); 1609 if (priv == NULL) { 1610 DPAA2_SEC_ERR("No Memory for priv CTXT"); 1611 return -1; 1612 } 1613 1614 priv->fle_pool = dev_priv->fle_pool; 1615 1616 flc = &priv->flc_desc[0].flc; 1617 1618 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, 1619 RTE_CACHE_LINE_SIZE); 1620 if (session->cipher_key.data == NULL) { 1621 DPAA2_SEC_ERR("No Memory for cipher key"); 1622 rte_free(priv); 1623 return -1; 1624 } 1625 session->cipher_key.length = xform->cipher.key.length; 1626 1627 memcpy(session->cipher_key.data, xform->cipher.key.data, 1628 xform->cipher.key.length); 1629 cipherdata.key = (size_t)session->cipher_key.data; 1630 cipherdata.keylen = session->cipher_key.length; 1631 cipherdata.key_enc_flags = 0; 1632 cipherdata.key_type = RTA_DATA_IMM; 1633 1634 /* Set IV parameters */ 1635 session->iv.offset = xform->cipher.iv.offset; 1636 session->iv.length = xform->cipher.iv.length; 1637 1638 switch (xform->cipher.algo) { 1639 case RTE_CRYPTO_CIPHER_AES_CBC: 1640 cipherdata.algtype = OP_ALG_ALGSEL_AES; 1641 cipherdata.algmode = OP_ALG_AAI_CBC; 1642 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 1643 break; 1644 case RTE_CRYPTO_CIPHER_3DES_CBC: 1645 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 1646 cipherdata.algmode = OP_ALG_AAI_CBC; 1647 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 1648 break; 1649 case RTE_CRYPTO_CIPHER_AES_CTR: 1650 cipherdata.algtype = OP_ALG_ALGSEL_AES; 1651 cipherdata.algmode = OP_ALG_AAI_CTR; 1652 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 1653 break; 1654 case RTE_CRYPTO_CIPHER_3DES_CTR: 1655 case RTE_CRYPTO_CIPHER_AES_ECB: 1656 case RTE_CRYPTO_CIPHER_3DES_ECB: 1657 case RTE_CRYPTO_CIPHER_AES_XTS: 1658 case RTE_CRYPTO_CIPHER_AES_F8: 1659 case RTE_CRYPTO_CIPHER_ARC4: 1660 case RTE_CRYPTO_CIPHER_KASUMI_F8: 1661 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 1662 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 1663 case RTE_CRYPTO_CIPHER_NULL: 1664 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", 1665 xform->cipher.algo); 1666 goto error_out; 1667 default: 1668 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 1669 xform->cipher.algo); 1670 goto error_out; 1671 } 1672 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1673 DIR_ENC : DIR_DEC; 1674 1675 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 1676 &cipherdata, NULL, session->iv.length, 1677 session->dir); 1678 if (bufsize < 0) { 1679 DPAA2_SEC_ERR("Crypto: Descriptor build failed"); 1680 goto error_out; 1681 } 1682 flc->dhr = 0; 1683 flc->bpv0 = 0x1; 1684 flc->mode_bits = 0x8000; 1685 1686 flc->word1_sdl = (uint8_t)bufsize; 1687 flc->word2_rflc_31_0 = lower_32_bits( 1688 (size_t)&(((struct dpaa2_sec_qp *) 1689 dev->data->queue_pairs[0])->rx_vq)); 1690 flc->word3_rflc_63_32 = upper_32_bits( 1691 (size_t)&(((struct dpaa2_sec_qp *) 1692 dev->data->queue_pairs[0])->rx_vq)); 1693 session->ctxt = priv; 1694 1695 for (i = 0; i < bufsize; i++) 1696 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]); 1697 1698 return 0; 1699 1700 error_out: 1701 rte_free(session->cipher_key.data); 1702 rte_free(priv); 1703 return -1; 1704 } 1705 1706 static int 1707 dpaa2_sec_auth_init(struct rte_cryptodev *dev, 1708 struct rte_crypto_sym_xform *xform, 1709 dpaa2_sec_session *session) 1710 { 1711 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1712 struct alginfo authdata; 1713 int bufsize, i; 1714 struct ctxt_priv *priv; 1715 struct sec_flow_context *flc; 1716 1717 PMD_INIT_FUNC_TRACE(); 1718 1719 /* For SEC AUTH three descriptors are required for various stages */ 1720 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1721 sizeof(struct ctxt_priv) + 3 * 1722 sizeof(struct sec_flc_desc), 1723 RTE_CACHE_LINE_SIZE); 1724 if (priv == NULL) { 1725 DPAA2_SEC_ERR("No Memory for priv CTXT"); 1726 return -1; 1727 } 1728 1729 priv->fle_pool = dev_priv->fle_pool; 1730 flc = &priv->flc_desc[DESC_INITFINAL].flc; 1731 1732 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length, 1733 RTE_CACHE_LINE_SIZE); 1734 if (session->auth_key.data == NULL) { 1735 DPAA2_SEC_ERR("Unable to allocate memory for auth key"); 1736 rte_free(priv); 1737 return -1; 1738 } 1739 session->auth_key.length = xform->auth.key.length; 1740 1741 memcpy(session->auth_key.data, xform->auth.key.data, 1742 xform->auth.key.length); 1743 authdata.key = (size_t)session->auth_key.data; 1744 authdata.keylen = session->auth_key.length; 1745 authdata.key_enc_flags = 0; 1746 authdata.key_type = RTA_DATA_IMM; 1747 1748 session->digest_length = xform->auth.digest_length; 1749 1750 switch (xform->auth.algo) { 1751 case RTE_CRYPTO_AUTH_SHA1_HMAC: 1752 authdata.algtype = OP_ALG_ALGSEL_SHA1; 1753 authdata.algmode = OP_ALG_AAI_HMAC; 1754 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 1755 break; 1756 case RTE_CRYPTO_AUTH_MD5_HMAC: 1757 authdata.algtype = OP_ALG_ALGSEL_MD5; 1758 authdata.algmode = OP_ALG_AAI_HMAC; 1759 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 1760 break; 1761 case RTE_CRYPTO_AUTH_SHA256_HMAC: 1762 authdata.algtype = OP_ALG_ALGSEL_SHA256; 1763 authdata.algmode = OP_ALG_AAI_HMAC; 1764 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 1765 break; 1766 case RTE_CRYPTO_AUTH_SHA384_HMAC: 1767 authdata.algtype = OP_ALG_ALGSEL_SHA384; 1768 authdata.algmode = OP_ALG_AAI_HMAC; 1769 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 1770 break; 1771 case RTE_CRYPTO_AUTH_SHA512_HMAC: 1772 authdata.algtype = OP_ALG_ALGSEL_SHA512; 1773 authdata.algmode = OP_ALG_AAI_HMAC; 1774 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 1775 break; 1776 case RTE_CRYPTO_AUTH_SHA224_HMAC: 1777 authdata.algtype = OP_ALG_ALGSEL_SHA224; 1778 authdata.algmode = OP_ALG_AAI_HMAC; 1779 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 1780 break; 1781 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 1782 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 1783 case RTE_CRYPTO_AUTH_NULL: 1784 case RTE_CRYPTO_AUTH_SHA1: 1785 case RTE_CRYPTO_AUTH_SHA256: 1786 case RTE_CRYPTO_AUTH_SHA512: 1787 case RTE_CRYPTO_AUTH_SHA224: 1788 case RTE_CRYPTO_AUTH_SHA384: 1789 case RTE_CRYPTO_AUTH_MD5: 1790 case RTE_CRYPTO_AUTH_AES_GMAC: 1791 case RTE_CRYPTO_AUTH_KASUMI_F9: 1792 case RTE_CRYPTO_AUTH_AES_CMAC: 1793 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 1794 case RTE_CRYPTO_AUTH_ZUC_EIA3: 1795 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un", 1796 xform->auth.algo); 1797 goto error_out; 1798 default: 1799 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", 1800 xform->auth.algo); 1801 goto error_out; 1802 } 1803 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? 1804 DIR_ENC : DIR_DEC; 1805 1806 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 1807 1, 0, &authdata, !session->dir, 1808 session->digest_length); 1809 if (bufsize < 0) { 1810 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 1811 goto error_out; 1812 } 1813 1814 flc->word1_sdl = (uint8_t)bufsize; 1815 flc->word2_rflc_31_0 = lower_32_bits( 1816 (size_t)&(((struct dpaa2_sec_qp *) 1817 dev->data->queue_pairs[0])->rx_vq)); 1818 flc->word3_rflc_63_32 = upper_32_bits( 1819 (size_t)&(((struct dpaa2_sec_qp *) 1820 dev->data->queue_pairs[0])->rx_vq)); 1821 session->ctxt = priv; 1822 for (i = 0; i < bufsize; i++) 1823 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", 1824 i, priv->flc_desc[DESC_INITFINAL].desc[i]); 1825 1826 1827 return 0; 1828 1829 error_out: 1830 rte_free(session->auth_key.data); 1831 rte_free(priv); 1832 return -1; 1833 } 1834 1835 static int 1836 dpaa2_sec_aead_init(struct rte_cryptodev *dev, 1837 struct rte_crypto_sym_xform *xform, 1838 dpaa2_sec_session *session) 1839 { 1840 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt; 1841 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1842 struct alginfo aeaddata; 1843 int bufsize, i; 1844 struct ctxt_priv *priv; 1845 struct sec_flow_context *flc; 1846 struct rte_crypto_aead_xform *aead_xform = &xform->aead; 1847 int err; 1848 1849 PMD_INIT_FUNC_TRACE(); 1850 1851 /* Set IV parameters */ 1852 session->iv.offset = aead_xform->iv.offset; 1853 session->iv.length = aead_xform->iv.length; 1854 session->ctxt_type = DPAA2_SEC_AEAD; 1855 1856 /* For SEC AEAD only one descriptor is required */ 1857 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1858 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 1859 RTE_CACHE_LINE_SIZE); 1860 if (priv == NULL) { 1861 DPAA2_SEC_ERR("No Memory for priv CTXT"); 1862 return -1; 1863 } 1864 1865 priv->fle_pool = dev_priv->fle_pool; 1866 flc = &priv->flc_desc[0].flc; 1867 1868 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 1869 RTE_CACHE_LINE_SIZE); 1870 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 1871 DPAA2_SEC_ERR("No Memory for aead key"); 1872 rte_free(priv); 1873 return -1; 1874 } 1875 memcpy(session->aead_key.data, aead_xform->key.data, 1876 aead_xform->key.length); 1877 1878 session->digest_length = aead_xform->digest_length; 1879 session->aead_key.length = aead_xform->key.length; 1880 ctxt->auth_only_len = aead_xform->aad_length; 1881 1882 aeaddata.key = (size_t)session->aead_key.data; 1883 aeaddata.keylen = session->aead_key.length; 1884 aeaddata.key_enc_flags = 0; 1885 aeaddata.key_type = RTA_DATA_IMM; 1886 1887 switch (aead_xform->algo) { 1888 case RTE_CRYPTO_AEAD_AES_GCM: 1889 aeaddata.algtype = OP_ALG_ALGSEL_AES; 1890 aeaddata.algmode = OP_ALG_AAI_GCM; 1891 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; 1892 break; 1893 case RTE_CRYPTO_AEAD_AES_CCM: 1894 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u", 1895 aead_xform->algo); 1896 goto error_out; 1897 default: 1898 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u", 1899 aead_xform->algo); 1900 goto error_out; 1901 } 1902 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 1903 DIR_ENC : DIR_DEC; 1904 1905 priv->flc_desc[0].desc[0] = aeaddata.keylen; 1906 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 1907 MIN_JOB_DESC_SIZE, 1908 (unsigned int *)priv->flc_desc[0].desc, 1909 &priv->flc_desc[0].desc[1], 1); 1910 1911 if (err < 0) { 1912 DPAA2_SEC_ERR("Crypto: Incorrect key lengths"); 1913 goto error_out; 1914 } 1915 if (priv->flc_desc[0].desc[1] & 1) { 1916 aeaddata.key_type = RTA_DATA_IMM; 1917 } else { 1918 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key); 1919 aeaddata.key_type = RTA_DATA_PTR; 1920 } 1921 priv->flc_desc[0].desc[0] = 0; 1922 priv->flc_desc[0].desc[1] = 0; 1923 1924 if (session->dir == DIR_ENC) 1925 bufsize = cnstr_shdsc_gcm_encap( 1926 priv->flc_desc[0].desc, 1, 0, 1927 &aeaddata, session->iv.length, 1928 session->digest_length); 1929 else 1930 bufsize = cnstr_shdsc_gcm_decap( 1931 priv->flc_desc[0].desc, 1, 0, 1932 &aeaddata, session->iv.length, 1933 session->digest_length); 1934 if (bufsize < 0) { 1935 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 1936 goto error_out; 1937 } 1938 1939 flc->word1_sdl = (uint8_t)bufsize; 1940 flc->word2_rflc_31_0 = lower_32_bits( 1941 (size_t)&(((struct dpaa2_sec_qp *) 1942 dev->data->queue_pairs[0])->rx_vq)); 1943 flc->word3_rflc_63_32 = upper_32_bits( 1944 (size_t)&(((struct dpaa2_sec_qp *) 1945 dev->data->queue_pairs[0])->rx_vq)); 1946 session->ctxt = priv; 1947 for (i = 0; i < bufsize; i++) 1948 DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n", 1949 i, priv->flc_desc[0].desc[i]); 1950 1951 return 0; 1952 1953 error_out: 1954 rte_free(session->aead_key.data); 1955 rte_free(priv); 1956 return -1; 1957 } 1958 1959 1960 static int 1961 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, 1962 struct rte_crypto_sym_xform *xform, 1963 dpaa2_sec_session *session) 1964 { 1965 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt; 1966 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1967 struct alginfo authdata, cipherdata; 1968 int bufsize, i; 1969 struct ctxt_priv *priv; 1970 struct sec_flow_context *flc; 1971 struct rte_crypto_cipher_xform *cipher_xform; 1972 struct rte_crypto_auth_xform *auth_xform; 1973 int err; 1974 1975 PMD_INIT_FUNC_TRACE(); 1976 1977 if (session->ext_params.aead_ctxt.auth_cipher_text) { 1978 cipher_xform = &xform->cipher; 1979 auth_xform = &xform->next->auth; 1980 session->ctxt_type = 1981 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1982 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER; 1983 } else { 1984 cipher_xform = &xform->next->cipher; 1985 auth_xform = &xform->auth; 1986 session->ctxt_type = 1987 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1988 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH; 1989 } 1990 1991 /* Set IV parameters */ 1992 session->iv.offset = cipher_xform->iv.offset; 1993 session->iv.length = cipher_xform->iv.length; 1994 1995 /* For SEC AEAD only one descriptor is required */ 1996 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1997 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 1998 RTE_CACHE_LINE_SIZE); 1999 if (priv == NULL) { 2000 DPAA2_SEC_ERR("No Memory for priv CTXT"); 2001 return -1; 2002 } 2003 2004 priv->fle_pool = dev_priv->fle_pool; 2005 flc = &priv->flc_desc[0].flc; 2006 2007 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length, 2008 RTE_CACHE_LINE_SIZE); 2009 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) { 2010 DPAA2_SEC_ERR("No Memory for cipher key"); 2011 rte_free(priv); 2012 return -1; 2013 } 2014 session->cipher_key.length = cipher_xform->key.length; 2015 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length, 2016 RTE_CACHE_LINE_SIZE); 2017 if (session->auth_key.data == NULL && auth_xform->key.length > 0) { 2018 DPAA2_SEC_ERR("No Memory for auth key"); 2019 rte_free(session->cipher_key.data); 2020 rte_free(priv); 2021 return -1; 2022 } 2023 session->auth_key.length = auth_xform->key.length; 2024 memcpy(session->cipher_key.data, cipher_xform->key.data, 2025 cipher_xform->key.length); 2026 memcpy(session->auth_key.data, auth_xform->key.data, 2027 auth_xform->key.length); 2028 2029 authdata.key = (size_t)session->auth_key.data; 2030 authdata.keylen = session->auth_key.length; 2031 authdata.key_enc_flags = 0; 2032 authdata.key_type = RTA_DATA_IMM; 2033 2034 session->digest_length = auth_xform->digest_length; 2035 2036 switch (auth_xform->algo) { 2037 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2038 authdata.algtype = OP_ALG_ALGSEL_SHA1; 2039 authdata.algmode = OP_ALG_AAI_HMAC; 2040 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 2041 break; 2042 case RTE_CRYPTO_AUTH_MD5_HMAC: 2043 authdata.algtype = OP_ALG_ALGSEL_MD5; 2044 authdata.algmode = OP_ALG_AAI_HMAC; 2045 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 2046 break; 2047 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2048 authdata.algtype = OP_ALG_ALGSEL_SHA224; 2049 authdata.algmode = OP_ALG_AAI_HMAC; 2050 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 2051 break; 2052 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2053 authdata.algtype = OP_ALG_ALGSEL_SHA256; 2054 authdata.algmode = OP_ALG_AAI_HMAC; 2055 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 2056 break; 2057 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2058 authdata.algtype = OP_ALG_ALGSEL_SHA384; 2059 authdata.algmode = OP_ALG_AAI_HMAC; 2060 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 2061 break; 2062 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2063 authdata.algtype = OP_ALG_ALGSEL_SHA512; 2064 authdata.algmode = OP_ALG_AAI_HMAC; 2065 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 2066 break; 2067 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2068 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2069 case RTE_CRYPTO_AUTH_NULL: 2070 case RTE_CRYPTO_AUTH_SHA1: 2071 case RTE_CRYPTO_AUTH_SHA256: 2072 case RTE_CRYPTO_AUTH_SHA512: 2073 case RTE_CRYPTO_AUTH_SHA224: 2074 case RTE_CRYPTO_AUTH_SHA384: 2075 case RTE_CRYPTO_AUTH_MD5: 2076 case RTE_CRYPTO_AUTH_AES_GMAC: 2077 case RTE_CRYPTO_AUTH_KASUMI_F9: 2078 case RTE_CRYPTO_AUTH_AES_CMAC: 2079 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 2080 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2081 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 2082 auth_xform->algo); 2083 goto error_out; 2084 default: 2085 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", 2086 auth_xform->algo); 2087 goto error_out; 2088 } 2089 cipherdata.key = (size_t)session->cipher_key.data; 2090 cipherdata.keylen = session->cipher_key.length; 2091 cipherdata.key_enc_flags = 0; 2092 cipherdata.key_type = RTA_DATA_IMM; 2093 2094 switch (cipher_xform->algo) { 2095 case RTE_CRYPTO_CIPHER_AES_CBC: 2096 cipherdata.algtype = OP_ALG_ALGSEL_AES; 2097 cipherdata.algmode = OP_ALG_AAI_CBC; 2098 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 2099 break; 2100 case RTE_CRYPTO_CIPHER_3DES_CBC: 2101 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 2102 cipherdata.algmode = OP_ALG_AAI_CBC; 2103 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 2104 break; 2105 case RTE_CRYPTO_CIPHER_AES_CTR: 2106 cipherdata.algtype = OP_ALG_ALGSEL_AES; 2107 cipherdata.algmode = OP_ALG_AAI_CTR; 2108 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 2109 break; 2110 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2111 case RTE_CRYPTO_CIPHER_NULL: 2112 case RTE_CRYPTO_CIPHER_3DES_ECB: 2113 case RTE_CRYPTO_CIPHER_AES_ECB: 2114 case RTE_CRYPTO_CIPHER_KASUMI_F8: 2115 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", 2116 cipher_xform->algo); 2117 goto error_out; 2118 default: 2119 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 2120 cipher_xform->algo); 2121 goto error_out; 2122 } 2123 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2124 DIR_ENC : DIR_DEC; 2125 2126 priv->flc_desc[0].desc[0] = cipherdata.keylen; 2127 priv->flc_desc[0].desc[1] = authdata.keylen; 2128 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 2129 MIN_JOB_DESC_SIZE, 2130 (unsigned int *)priv->flc_desc[0].desc, 2131 &priv->flc_desc[0].desc[2], 2); 2132 2133 if (err < 0) { 2134 DPAA2_SEC_ERR("Crypto: Incorrect key lengths"); 2135 goto error_out; 2136 } 2137 if (priv->flc_desc[0].desc[2] & 1) { 2138 cipherdata.key_type = RTA_DATA_IMM; 2139 } else { 2140 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); 2141 cipherdata.key_type = RTA_DATA_PTR; 2142 } 2143 if (priv->flc_desc[0].desc[2] & (1 << 1)) { 2144 authdata.key_type = RTA_DATA_IMM; 2145 } else { 2146 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key); 2147 authdata.key_type = RTA_DATA_PTR; 2148 } 2149 priv->flc_desc[0].desc[0] = 0; 2150 priv->flc_desc[0].desc[1] = 0; 2151 priv->flc_desc[0].desc[2] = 0; 2152 2153 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) { 2154 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1, 2155 0, &cipherdata, &authdata, 2156 session->iv.length, 2157 ctxt->auth_only_len, 2158 session->digest_length, 2159 session->dir); 2160 if (bufsize < 0) { 2161 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2162 goto error_out; 2163 } 2164 } else { 2165 DPAA2_SEC_ERR("Hash before cipher not supported"); 2166 goto error_out; 2167 } 2168 2169 flc->word1_sdl = (uint8_t)bufsize; 2170 flc->word2_rflc_31_0 = lower_32_bits( 2171 (size_t)&(((struct dpaa2_sec_qp *) 2172 dev->data->queue_pairs[0])->rx_vq)); 2173 flc->word3_rflc_63_32 = upper_32_bits( 2174 (size_t)&(((struct dpaa2_sec_qp *) 2175 dev->data->queue_pairs[0])->rx_vq)); 2176 session->ctxt = priv; 2177 for (i = 0; i < bufsize; i++) 2178 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", 2179 i, priv->flc_desc[0].desc[i]); 2180 2181 return 0; 2182 2183 error_out: 2184 rte_free(session->cipher_key.data); 2185 rte_free(session->auth_key.data); 2186 rte_free(priv); 2187 return -1; 2188 } 2189 2190 static int 2191 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev, 2192 struct rte_crypto_sym_xform *xform, void *sess) 2193 { 2194 dpaa2_sec_session *session = sess; 2195 2196 PMD_INIT_FUNC_TRACE(); 2197 2198 if (unlikely(sess == NULL)) { 2199 DPAA2_SEC_ERR("Invalid session struct"); 2200 return -1; 2201 } 2202 2203 memset(session, 0, sizeof(dpaa2_sec_session)); 2204 /* Default IV length = 0 */ 2205 session->iv.length = 0; 2206 2207 /* Cipher Only */ 2208 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 2209 session->ctxt_type = DPAA2_SEC_CIPHER; 2210 dpaa2_sec_cipher_init(dev, xform, session); 2211 2212 /* Authentication Only */ 2213 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2214 xform->next == NULL) { 2215 session->ctxt_type = DPAA2_SEC_AUTH; 2216 dpaa2_sec_auth_init(dev, xform, session); 2217 2218 /* Cipher then Authenticate */ 2219 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 2220 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2221 session->ext_params.aead_ctxt.auth_cipher_text = true; 2222 dpaa2_sec_aead_chain_init(dev, xform, session); 2223 2224 /* Authenticate then Cipher */ 2225 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2226 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2227 session->ext_params.aead_ctxt.auth_cipher_text = false; 2228 dpaa2_sec_aead_chain_init(dev, xform, session); 2229 2230 /* AEAD operation for AES-GCM kind of Algorithms */ 2231 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && 2232 xform->next == NULL) { 2233 dpaa2_sec_aead_init(dev, xform, session); 2234 2235 } else { 2236 DPAA2_SEC_ERR("Invalid crypto type"); 2237 return -EINVAL; 2238 } 2239 2240 return 0; 2241 } 2242 2243 static int 2244 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform, 2245 dpaa2_sec_session *session, 2246 struct alginfo *aeaddata) 2247 { 2248 PMD_INIT_FUNC_TRACE(); 2249 2250 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 2251 RTE_CACHE_LINE_SIZE); 2252 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 2253 DPAA2_SEC_ERR("No Memory for aead key"); 2254 return -1; 2255 } 2256 memcpy(session->aead_key.data, aead_xform->key.data, 2257 aead_xform->key.length); 2258 2259 session->digest_length = aead_xform->digest_length; 2260 session->aead_key.length = aead_xform->key.length; 2261 2262 aeaddata->key = (size_t)session->aead_key.data; 2263 aeaddata->keylen = session->aead_key.length; 2264 aeaddata->key_enc_flags = 0; 2265 aeaddata->key_type = RTA_DATA_IMM; 2266 2267 switch (aead_xform->algo) { 2268 case RTE_CRYPTO_AEAD_AES_GCM: 2269 aeaddata->algtype = OP_ALG_ALGSEL_AES; 2270 aeaddata->algmode = OP_ALG_AAI_GCM; 2271 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; 2272 break; 2273 case RTE_CRYPTO_AEAD_AES_CCM: 2274 aeaddata->algtype = OP_ALG_ALGSEL_AES; 2275 aeaddata->algmode = OP_ALG_AAI_CCM; 2276 session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM; 2277 break; 2278 default: 2279 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u", 2280 aead_xform->algo); 2281 return -1; 2282 } 2283 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 2284 DIR_ENC : DIR_DEC; 2285 2286 return 0; 2287 } 2288 2289 static int 2290 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform, 2291 struct rte_crypto_auth_xform *auth_xform, 2292 dpaa2_sec_session *session, 2293 struct alginfo *cipherdata, 2294 struct alginfo *authdata) 2295 { 2296 if (cipher_xform) { 2297 session->cipher_key.data = rte_zmalloc(NULL, 2298 cipher_xform->key.length, 2299 RTE_CACHE_LINE_SIZE); 2300 if (session->cipher_key.data == NULL && 2301 cipher_xform->key.length > 0) { 2302 DPAA2_SEC_ERR("No Memory for cipher key"); 2303 return -ENOMEM; 2304 } 2305 2306 session->cipher_key.length = cipher_xform->key.length; 2307 memcpy(session->cipher_key.data, cipher_xform->key.data, 2308 cipher_xform->key.length); 2309 session->cipher_alg = cipher_xform->algo; 2310 } else { 2311 session->cipher_key.data = NULL; 2312 session->cipher_key.length = 0; 2313 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2314 } 2315 2316 if (auth_xform) { 2317 session->auth_key.data = rte_zmalloc(NULL, 2318 auth_xform->key.length, 2319 RTE_CACHE_LINE_SIZE); 2320 if (session->auth_key.data == NULL && 2321 auth_xform->key.length > 0) { 2322 DPAA2_SEC_ERR("No Memory for auth key"); 2323 return -ENOMEM; 2324 } 2325 session->auth_key.length = auth_xform->key.length; 2326 memcpy(session->auth_key.data, auth_xform->key.data, 2327 auth_xform->key.length); 2328 session->auth_alg = auth_xform->algo; 2329 } else { 2330 session->auth_key.data = NULL; 2331 session->auth_key.length = 0; 2332 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 2333 } 2334 2335 authdata->key = (size_t)session->auth_key.data; 2336 authdata->keylen = session->auth_key.length; 2337 authdata->key_enc_flags = 0; 2338 authdata->key_type = RTA_DATA_IMM; 2339 switch (session->auth_alg) { 2340 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2341 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96; 2342 authdata->algmode = OP_ALG_AAI_HMAC; 2343 break; 2344 case RTE_CRYPTO_AUTH_MD5_HMAC: 2345 authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96; 2346 authdata->algmode = OP_ALG_AAI_HMAC; 2347 break; 2348 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2349 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128; 2350 authdata->algmode = OP_ALG_AAI_HMAC; 2351 break; 2352 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2353 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192; 2354 authdata->algmode = OP_ALG_AAI_HMAC; 2355 break; 2356 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2357 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256; 2358 authdata->algmode = OP_ALG_AAI_HMAC; 2359 break; 2360 case RTE_CRYPTO_AUTH_AES_CMAC: 2361 authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96; 2362 break; 2363 case RTE_CRYPTO_AUTH_NULL: 2364 authdata->algtype = OP_PCL_IPSEC_HMAC_NULL; 2365 break; 2366 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2367 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2368 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2369 case RTE_CRYPTO_AUTH_SHA1: 2370 case RTE_CRYPTO_AUTH_SHA256: 2371 case RTE_CRYPTO_AUTH_SHA512: 2372 case RTE_CRYPTO_AUTH_SHA224: 2373 case RTE_CRYPTO_AUTH_SHA384: 2374 case RTE_CRYPTO_AUTH_MD5: 2375 case RTE_CRYPTO_AUTH_AES_GMAC: 2376 case RTE_CRYPTO_AUTH_KASUMI_F9: 2377 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 2378 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2379 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 2380 session->auth_alg); 2381 return -1; 2382 default: 2383 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", 2384 session->auth_alg); 2385 return -1; 2386 } 2387 cipherdata->key = (size_t)session->cipher_key.data; 2388 cipherdata->keylen = session->cipher_key.length; 2389 cipherdata->key_enc_flags = 0; 2390 cipherdata->key_type = RTA_DATA_IMM; 2391 2392 switch (session->cipher_alg) { 2393 case RTE_CRYPTO_CIPHER_AES_CBC: 2394 cipherdata->algtype = OP_PCL_IPSEC_AES_CBC; 2395 cipherdata->algmode = OP_ALG_AAI_CBC; 2396 break; 2397 case RTE_CRYPTO_CIPHER_3DES_CBC: 2398 cipherdata->algtype = OP_PCL_IPSEC_3DES; 2399 cipherdata->algmode = OP_ALG_AAI_CBC; 2400 break; 2401 case RTE_CRYPTO_CIPHER_AES_CTR: 2402 cipherdata->algtype = OP_PCL_IPSEC_AES_CTR; 2403 cipherdata->algmode = OP_ALG_AAI_CTR; 2404 break; 2405 case RTE_CRYPTO_CIPHER_NULL: 2406 cipherdata->algtype = OP_PCL_IPSEC_NULL; 2407 break; 2408 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2409 case RTE_CRYPTO_CIPHER_3DES_ECB: 2410 case RTE_CRYPTO_CIPHER_AES_ECB: 2411 case RTE_CRYPTO_CIPHER_KASUMI_F8: 2412 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", 2413 session->cipher_alg); 2414 return -1; 2415 default: 2416 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 2417 session->cipher_alg); 2418 return -1; 2419 } 2420 2421 return 0; 2422 } 2423 2424 #ifdef RTE_LIBRTE_SECURITY_TEST 2425 static uint8_t aes_cbc_iv[] = { 2426 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 2427 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }; 2428 #endif 2429 2430 static int 2431 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, 2432 struct rte_security_session_conf *conf, 2433 void *sess) 2434 { 2435 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec; 2436 struct rte_crypto_cipher_xform *cipher_xform = NULL; 2437 struct rte_crypto_auth_xform *auth_xform = NULL; 2438 struct rte_crypto_aead_xform *aead_xform = NULL; 2439 dpaa2_sec_session *session = (dpaa2_sec_session *)sess; 2440 struct ctxt_priv *priv; 2441 struct ipsec_encap_pdb encap_pdb; 2442 struct ipsec_decap_pdb decap_pdb; 2443 struct alginfo authdata, cipherdata; 2444 int bufsize; 2445 struct sec_flow_context *flc; 2446 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 2447 int ret = -1; 2448 2449 PMD_INIT_FUNC_TRACE(); 2450 2451 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2452 sizeof(struct ctxt_priv) + 2453 sizeof(struct sec_flc_desc), 2454 RTE_CACHE_LINE_SIZE); 2455 2456 if (priv == NULL) { 2457 DPAA2_SEC_ERR("No memory for priv CTXT"); 2458 return -ENOMEM; 2459 } 2460 2461 priv->fle_pool = dev_priv->fle_pool; 2462 flc = &priv->flc_desc[0].flc; 2463 2464 memset(session, 0, sizeof(dpaa2_sec_session)); 2465 2466 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2467 cipher_xform = &conf->crypto_xform->cipher; 2468 if (conf->crypto_xform->next) 2469 auth_xform = &conf->crypto_xform->next->auth; 2470 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform, 2471 session, &cipherdata, &authdata); 2472 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2473 auth_xform = &conf->crypto_xform->auth; 2474 if (conf->crypto_xform->next) 2475 cipher_xform = &conf->crypto_xform->next->cipher; 2476 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform, 2477 session, &cipherdata, &authdata); 2478 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { 2479 aead_xform = &conf->crypto_xform->aead; 2480 ret = dpaa2_sec_ipsec_aead_init(aead_xform, 2481 session, &cipherdata); 2482 } else { 2483 DPAA2_SEC_ERR("XFORM not specified"); 2484 ret = -EINVAL; 2485 goto out; 2486 } 2487 if (ret) { 2488 DPAA2_SEC_ERR("Failed to process xform"); 2489 goto out; 2490 } 2491 2492 session->ctxt_type = DPAA2_SEC_IPSEC; 2493 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 2494 struct ip ip4_hdr; 2495 2496 flc->dhr = SEC_FLC_DHR_OUTBOUND; 2497 ip4_hdr.ip_v = IPVERSION; 2498 ip4_hdr.ip_hl = 5; 2499 ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr)); 2500 ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp; 2501 ip4_hdr.ip_id = 0; 2502 ip4_hdr.ip_off = 0; 2503 ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl; 2504 ip4_hdr.ip_p = IPPROTO_ESP; 2505 ip4_hdr.ip_sum = 0; 2506 ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip; 2507 ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip; 2508 ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)&ip4_hdr, 2509 sizeof(struct ip)); 2510 2511 /* For Sec Proto only one descriptor is required. */ 2512 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb)); 2513 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) | 2514 PDBOPTS_ESP_OIHI_PDB_INL | 2515 PDBOPTS_ESP_IVSRC | 2516 PDBHMO_ESP_ENCAP_DTTL | 2517 PDBHMO_ESP_SNR; 2518 encap_pdb.spi = ipsec_xform->spi; 2519 encap_pdb.ip_hdr_len = sizeof(struct ip); 2520 2521 session->dir = DIR_ENC; 2522 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc, 2523 1, 0, SHR_SERIAL, &encap_pdb, 2524 (uint8_t *)&ip4_hdr, 2525 &cipherdata, &authdata); 2526 } else if (ipsec_xform->direction == 2527 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { 2528 flc->dhr = SEC_FLC_DHR_INBOUND; 2529 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb)); 2530 decap_pdb.options = sizeof(struct ip) << 16; 2531 session->dir = DIR_DEC; 2532 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc, 2533 1, 0, SHR_SERIAL, 2534 &decap_pdb, &cipherdata, &authdata); 2535 } else 2536 goto out; 2537 2538 if (bufsize < 0) { 2539 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2540 goto out; 2541 } 2542 2543 flc->word1_sdl = (uint8_t)bufsize; 2544 2545 /* Enable the stashing control bit */ 2546 DPAA2_SET_FLC_RSC(flc); 2547 flc->word2_rflc_31_0 = lower_32_bits( 2548 (size_t)&(((struct dpaa2_sec_qp *) 2549 dev->data->queue_pairs[0])->rx_vq) | 0x14); 2550 flc->word3_rflc_63_32 = upper_32_bits( 2551 (size_t)&(((struct dpaa2_sec_qp *) 2552 dev->data->queue_pairs[0])->rx_vq)); 2553 2554 /* Set EWS bit i.e. enable write-safe */ 2555 DPAA2_SET_FLC_EWS(flc); 2556 /* Set BS = 1 i.e reuse input buffers as output buffers */ 2557 DPAA2_SET_FLC_REUSE_BS(flc); 2558 /* Set FF = 10; reuse input buffers if they provide sufficient space */ 2559 DPAA2_SET_FLC_REUSE_FF(flc); 2560 2561 session->ctxt = priv; 2562 2563 return 0; 2564 out: 2565 rte_free(session->auth_key.data); 2566 rte_free(session->cipher_key.data); 2567 rte_free(priv); 2568 return ret; 2569 } 2570 2571 static int 2572 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev, 2573 struct rte_security_session_conf *conf, 2574 void *sess) 2575 { 2576 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp; 2577 struct rte_crypto_sym_xform *xform = conf->crypto_xform; 2578 struct rte_crypto_auth_xform *auth_xform = NULL; 2579 struct rte_crypto_cipher_xform *cipher_xform; 2580 dpaa2_sec_session *session = (dpaa2_sec_session *)sess; 2581 struct ctxt_priv *priv; 2582 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 2583 struct alginfo authdata, cipherdata; 2584 int bufsize = -1; 2585 struct sec_flow_context *flc; 2586 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 2587 int swap = true; 2588 #else 2589 int swap = false; 2590 #endif 2591 2592 PMD_INIT_FUNC_TRACE(); 2593 2594 memset(session, 0, sizeof(dpaa2_sec_session)); 2595 2596 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2597 sizeof(struct ctxt_priv) + 2598 sizeof(struct sec_flc_desc), 2599 RTE_CACHE_LINE_SIZE); 2600 2601 if (priv == NULL) { 2602 DPAA2_SEC_ERR("No memory for priv CTXT"); 2603 return -ENOMEM; 2604 } 2605 2606 priv->fle_pool = dev_priv->fle_pool; 2607 flc = &priv->flc_desc[0].flc; 2608 2609 /* find xfrm types */ 2610 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 2611 cipher_xform = &xform->cipher; 2612 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 2613 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2614 session->ext_params.aead_ctxt.auth_cipher_text = true; 2615 cipher_xform = &xform->cipher; 2616 auth_xform = &xform->next->auth; 2617 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2618 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2619 session->ext_params.aead_ctxt.auth_cipher_text = false; 2620 cipher_xform = &xform->next->cipher; 2621 auth_xform = &xform->auth; 2622 } else { 2623 DPAA2_SEC_ERR("Invalid crypto type"); 2624 return -EINVAL; 2625 } 2626 2627 session->ctxt_type = DPAA2_SEC_PDCP; 2628 if (cipher_xform) { 2629 session->cipher_key.data = rte_zmalloc(NULL, 2630 cipher_xform->key.length, 2631 RTE_CACHE_LINE_SIZE); 2632 if (session->cipher_key.data == NULL && 2633 cipher_xform->key.length > 0) { 2634 DPAA2_SEC_ERR("No Memory for cipher key"); 2635 rte_free(priv); 2636 return -ENOMEM; 2637 } 2638 session->cipher_key.length = cipher_xform->key.length; 2639 memcpy(session->cipher_key.data, cipher_xform->key.data, 2640 cipher_xform->key.length); 2641 session->dir = 2642 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2643 DIR_ENC : DIR_DEC; 2644 session->cipher_alg = cipher_xform->algo; 2645 } else { 2646 session->cipher_key.data = NULL; 2647 session->cipher_key.length = 0; 2648 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2649 session->dir = DIR_ENC; 2650 } 2651 2652 session->pdcp.domain = pdcp_xform->domain; 2653 session->pdcp.bearer = pdcp_xform->bearer; 2654 session->pdcp.pkt_dir = pdcp_xform->pkt_dir; 2655 session->pdcp.sn_size = pdcp_xform->sn_size; 2656 #ifdef ENABLE_HFN_OVERRIDE 2657 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovd; 2658 #endif 2659 session->pdcp.hfn = pdcp_xform->hfn; 2660 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold; 2661 2662 cipherdata.key = (size_t)session->cipher_key.data; 2663 cipherdata.keylen = session->cipher_key.length; 2664 cipherdata.key_enc_flags = 0; 2665 cipherdata.key_type = RTA_DATA_IMM; 2666 2667 switch (session->cipher_alg) { 2668 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2669 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW; 2670 break; 2671 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 2672 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC; 2673 break; 2674 case RTE_CRYPTO_CIPHER_AES_CTR: 2675 cipherdata.algtype = PDCP_CIPHER_TYPE_AES; 2676 break; 2677 case RTE_CRYPTO_CIPHER_NULL: 2678 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL; 2679 break; 2680 default: 2681 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 2682 session->cipher_alg); 2683 goto out; 2684 } 2685 2686 /* Auth is only applicable for control mode operation. */ 2687 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { 2688 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5) { 2689 DPAA2_SEC_ERR( 2690 "PDCP Seq Num size should be 5 bits for cmode"); 2691 goto out; 2692 } 2693 if (auth_xform) { 2694 session->auth_key.data = rte_zmalloc(NULL, 2695 auth_xform->key.length, 2696 RTE_CACHE_LINE_SIZE); 2697 if (session->auth_key.data == NULL && 2698 auth_xform->key.length > 0) { 2699 DPAA2_SEC_ERR("No Memory for auth key"); 2700 rte_free(session->cipher_key.data); 2701 rte_free(priv); 2702 return -ENOMEM; 2703 } 2704 session->auth_key.length = auth_xform->key.length; 2705 memcpy(session->auth_key.data, auth_xform->key.data, 2706 auth_xform->key.length); 2707 session->auth_alg = auth_xform->algo; 2708 } else { 2709 session->auth_key.data = NULL; 2710 session->auth_key.length = 0; 2711 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 2712 } 2713 authdata.key = (size_t)session->auth_key.data; 2714 authdata.keylen = session->auth_key.length; 2715 authdata.key_enc_flags = 0; 2716 authdata.key_type = RTA_DATA_IMM; 2717 2718 switch (session->auth_alg) { 2719 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2720 authdata.algtype = PDCP_AUTH_TYPE_SNOW; 2721 break; 2722 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2723 authdata.algtype = PDCP_AUTH_TYPE_ZUC; 2724 break; 2725 case RTE_CRYPTO_AUTH_AES_CMAC: 2726 authdata.algtype = PDCP_AUTH_TYPE_AES; 2727 break; 2728 case RTE_CRYPTO_AUTH_NULL: 2729 authdata.algtype = PDCP_AUTH_TYPE_NULL; 2730 break; 2731 default: 2732 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 2733 session->auth_alg); 2734 goto out; 2735 } 2736 2737 if (session->dir == DIR_ENC) 2738 bufsize = cnstr_shdsc_pdcp_c_plane_encap( 2739 priv->flc_desc[0].desc, 1, swap, 2740 pdcp_xform->hfn, 2741 pdcp_xform->bearer, 2742 pdcp_xform->pkt_dir, 2743 pdcp_xform->hfn_threshold, 2744 &cipherdata, &authdata, 2745 0); 2746 else if (session->dir == DIR_DEC) 2747 bufsize = cnstr_shdsc_pdcp_c_plane_decap( 2748 priv->flc_desc[0].desc, 1, swap, 2749 pdcp_xform->hfn, 2750 pdcp_xform->bearer, 2751 pdcp_xform->pkt_dir, 2752 pdcp_xform->hfn_threshold, 2753 &cipherdata, &authdata, 2754 0); 2755 } else { 2756 if (session->dir == DIR_ENC) 2757 bufsize = cnstr_shdsc_pdcp_u_plane_encap( 2758 priv->flc_desc[0].desc, 1, swap, 2759 (enum pdcp_sn_size)pdcp_xform->sn_size, 2760 pdcp_xform->hfn, 2761 pdcp_xform->bearer, 2762 pdcp_xform->pkt_dir, 2763 pdcp_xform->hfn_threshold, 2764 &cipherdata, 0); 2765 else if (session->dir == DIR_DEC) 2766 bufsize = cnstr_shdsc_pdcp_u_plane_decap( 2767 priv->flc_desc[0].desc, 1, swap, 2768 (enum pdcp_sn_size)pdcp_xform->sn_size, 2769 pdcp_xform->hfn, 2770 pdcp_xform->bearer, 2771 pdcp_xform->pkt_dir, 2772 pdcp_xform->hfn_threshold, 2773 &cipherdata, 0); 2774 } 2775 2776 if (bufsize < 0) { 2777 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2778 goto out; 2779 } 2780 2781 /* Enable the stashing control bit */ 2782 DPAA2_SET_FLC_RSC(flc); 2783 flc->word2_rflc_31_0 = lower_32_bits( 2784 (size_t)&(((struct dpaa2_sec_qp *) 2785 dev->data->queue_pairs[0])->rx_vq) | 0x14); 2786 flc->word3_rflc_63_32 = upper_32_bits( 2787 (size_t)&(((struct dpaa2_sec_qp *) 2788 dev->data->queue_pairs[0])->rx_vq)); 2789 2790 flc->word1_sdl = (uint8_t)bufsize; 2791 2792 /* Set EWS bit i.e. enable write-safe */ 2793 DPAA2_SET_FLC_EWS(flc); 2794 /* Set BS = 1 i.e reuse input buffers as output buffers */ 2795 DPAA2_SET_FLC_REUSE_BS(flc); 2796 /* Set FF = 10; reuse input buffers if they provide sufficient space */ 2797 DPAA2_SET_FLC_REUSE_FF(flc); 2798 2799 session->ctxt = priv; 2800 2801 return 0; 2802 out: 2803 rte_free(session->auth_key.data); 2804 rte_free(session->cipher_key.data); 2805 rte_free(priv); 2806 return -1; 2807 } 2808 2809 static int 2810 dpaa2_sec_security_session_create(void *dev, 2811 struct rte_security_session_conf *conf, 2812 struct rte_security_session *sess, 2813 struct rte_mempool *mempool) 2814 { 2815 void *sess_private_data; 2816 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; 2817 int ret; 2818 2819 if (rte_mempool_get(mempool, &sess_private_data)) { 2820 DPAA2_SEC_ERR("Couldn't get object from session mempool"); 2821 return -ENOMEM; 2822 } 2823 2824 switch (conf->protocol) { 2825 case RTE_SECURITY_PROTOCOL_IPSEC: 2826 ret = dpaa2_sec_set_ipsec_session(cdev, conf, 2827 sess_private_data); 2828 break; 2829 case RTE_SECURITY_PROTOCOL_MACSEC: 2830 return -ENOTSUP; 2831 case RTE_SECURITY_PROTOCOL_PDCP: 2832 ret = dpaa2_sec_set_pdcp_session(cdev, conf, 2833 sess_private_data); 2834 break; 2835 default: 2836 return -EINVAL; 2837 } 2838 if (ret != 0) { 2839 DPAA2_SEC_ERR("Failed to configure session parameters"); 2840 /* Return session to mempool */ 2841 rte_mempool_put(mempool, sess_private_data); 2842 return ret; 2843 } 2844 2845 set_sec_session_private_data(sess, sess_private_data); 2846 2847 return ret; 2848 } 2849 2850 /** Clear the memory of session so it doesn't leave key material behind */ 2851 static int 2852 dpaa2_sec_security_session_destroy(void *dev __rte_unused, 2853 struct rte_security_session *sess) 2854 { 2855 PMD_INIT_FUNC_TRACE(); 2856 void *sess_priv = get_sec_session_private_data(sess); 2857 2858 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv; 2859 2860 if (sess_priv) { 2861 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 2862 2863 rte_free(s->ctxt); 2864 rte_free(s->cipher_key.data); 2865 rte_free(s->auth_key.data); 2866 memset(sess, 0, sizeof(dpaa2_sec_session)); 2867 set_sec_session_private_data(sess, NULL); 2868 rte_mempool_put(sess_mp, sess_priv); 2869 } 2870 return 0; 2871 } 2872 2873 static int 2874 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev, 2875 struct rte_crypto_sym_xform *xform, 2876 struct rte_cryptodev_sym_session *sess, 2877 struct rte_mempool *mempool) 2878 { 2879 void *sess_private_data; 2880 int ret; 2881 2882 if (rte_mempool_get(mempool, &sess_private_data)) { 2883 DPAA2_SEC_ERR("Couldn't get object from session mempool"); 2884 return -ENOMEM; 2885 } 2886 2887 ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data); 2888 if (ret != 0) { 2889 DPAA2_SEC_ERR("Failed to configure session parameters"); 2890 /* Return session to mempool */ 2891 rte_mempool_put(mempool, sess_private_data); 2892 return ret; 2893 } 2894 2895 set_sym_session_private_data(sess, dev->driver_id, 2896 sess_private_data); 2897 2898 return 0; 2899 } 2900 2901 /** Clear the memory of session so it doesn't leave key material behind */ 2902 static void 2903 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev, 2904 struct rte_cryptodev_sym_session *sess) 2905 { 2906 PMD_INIT_FUNC_TRACE(); 2907 uint8_t index = dev->driver_id; 2908 void *sess_priv = get_sym_session_private_data(sess, index); 2909 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv; 2910 2911 if (sess_priv) { 2912 rte_free(s->ctxt); 2913 rte_free(s->cipher_key.data); 2914 rte_free(s->auth_key.data); 2915 memset(sess, 0, sizeof(dpaa2_sec_session)); 2916 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 2917 set_sym_session_private_data(sess, index, NULL); 2918 rte_mempool_put(sess_mp, sess_priv); 2919 } 2920 } 2921 2922 static int 2923 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused, 2924 struct rte_cryptodev_config *config __rte_unused) 2925 { 2926 PMD_INIT_FUNC_TRACE(); 2927 2928 return 0; 2929 } 2930 2931 static int 2932 dpaa2_sec_dev_start(struct rte_cryptodev *dev) 2933 { 2934 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 2935 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 2936 struct dpseci_attr attr; 2937 struct dpaa2_queue *dpaa2_q; 2938 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 2939 dev->data->queue_pairs; 2940 struct dpseci_rx_queue_attr rx_attr; 2941 struct dpseci_tx_queue_attr tx_attr; 2942 int ret, i; 2943 2944 PMD_INIT_FUNC_TRACE(); 2945 2946 memset(&attr, 0, sizeof(struct dpseci_attr)); 2947 2948 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token); 2949 if (ret) { 2950 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED", 2951 priv->hw_id); 2952 goto get_attr_failure; 2953 } 2954 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr); 2955 if (ret) { 2956 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC"); 2957 goto get_attr_failure; 2958 } 2959 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) { 2960 dpaa2_q = &qp[i]->rx_vq; 2961 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 2962 &rx_attr); 2963 dpaa2_q->fqid = rx_attr.fqid; 2964 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid); 2965 } 2966 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) { 2967 dpaa2_q = &qp[i]->tx_vq; 2968 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 2969 &tx_attr); 2970 dpaa2_q->fqid = tx_attr.fqid; 2971 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid); 2972 } 2973 2974 return 0; 2975 get_attr_failure: 2976 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 2977 return -1; 2978 } 2979 2980 static void 2981 dpaa2_sec_dev_stop(struct rte_cryptodev *dev) 2982 { 2983 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 2984 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 2985 int ret; 2986 2987 PMD_INIT_FUNC_TRACE(); 2988 2989 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 2990 if (ret) { 2991 DPAA2_SEC_ERR("Failure in disabling dpseci %d device", 2992 priv->hw_id); 2993 return; 2994 } 2995 2996 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token); 2997 if (ret < 0) { 2998 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret); 2999 return; 3000 } 3001 } 3002 3003 static int 3004 dpaa2_sec_dev_close(struct rte_cryptodev *dev) 3005 { 3006 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3007 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3008 int ret; 3009 3010 PMD_INIT_FUNC_TRACE(); 3011 3012 /* Function is reverse of dpaa2_sec_dev_init. 3013 * It does the following: 3014 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id 3015 * 2. Close the DPSECI device 3016 * 3. Free the allocated resources. 3017 */ 3018 3019 /*Close the device at underlying layer*/ 3020 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token); 3021 if (ret) { 3022 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret); 3023 return -1; 3024 } 3025 3026 /*Free the allocated memory for ethernet private data and dpseci*/ 3027 priv->hw = NULL; 3028 rte_free(dpseci); 3029 3030 return 0; 3031 } 3032 3033 static void 3034 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev, 3035 struct rte_cryptodev_info *info) 3036 { 3037 struct dpaa2_sec_dev_private *internals = dev->data->dev_private; 3038 3039 PMD_INIT_FUNC_TRACE(); 3040 if (info != NULL) { 3041 info->max_nb_queue_pairs = internals->max_nb_queue_pairs; 3042 info->feature_flags = dev->feature_flags; 3043 info->capabilities = dpaa2_sec_capabilities; 3044 /* No limit of number of sessions */ 3045 info->sym.max_nb_sessions = 0; 3046 info->driver_id = cryptodev_driver_id; 3047 } 3048 } 3049 3050 static 3051 void dpaa2_sec_stats_get(struct rte_cryptodev *dev, 3052 struct rte_cryptodev_stats *stats) 3053 { 3054 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3055 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3056 struct dpseci_sec_counters counters = {0}; 3057 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3058 dev->data->queue_pairs; 3059 int ret, i; 3060 3061 PMD_INIT_FUNC_TRACE(); 3062 if (stats == NULL) { 3063 DPAA2_SEC_ERR("Invalid stats ptr NULL"); 3064 return; 3065 } 3066 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 3067 if (qp[i] == NULL) { 3068 DPAA2_SEC_DEBUG("Uninitialised queue pair"); 3069 continue; 3070 } 3071 3072 stats->enqueued_count += qp[i]->tx_vq.tx_pkts; 3073 stats->dequeued_count += qp[i]->rx_vq.rx_pkts; 3074 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts; 3075 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts; 3076 } 3077 3078 ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token, 3079 &counters); 3080 if (ret) { 3081 DPAA2_SEC_ERR("SEC counters failed"); 3082 } else { 3083 DPAA2_SEC_INFO("dpseci hardware stats:" 3084 "\n\tNum of Requests Dequeued = %" PRIu64 3085 "\n\tNum of Outbound Encrypt Requests = %" PRIu64 3086 "\n\tNum of Inbound Decrypt Requests = %" PRIu64 3087 "\n\tNum of Outbound Bytes Encrypted = %" PRIu64 3088 "\n\tNum of Outbound Bytes Protected = %" PRIu64 3089 "\n\tNum of Inbound Bytes Decrypted = %" PRIu64 3090 "\n\tNum of Inbound Bytes Validated = %" PRIu64, 3091 counters.dequeued_requests, 3092 counters.ob_enc_requests, 3093 counters.ib_dec_requests, 3094 counters.ob_enc_bytes, 3095 counters.ob_prot_bytes, 3096 counters.ib_dec_bytes, 3097 counters.ib_valid_bytes); 3098 } 3099 } 3100 3101 static 3102 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev) 3103 { 3104 int i; 3105 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 3106 (dev->data->queue_pairs); 3107 3108 PMD_INIT_FUNC_TRACE(); 3109 3110 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 3111 if (qp[i] == NULL) { 3112 DPAA2_SEC_DEBUG("Uninitialised queue pair"); 3113 continue; 3114 } 3115 qp[i]->tx_vq.rx_pkts = 0; 3116 qp[i]->tx_vq.tx_pkts = 0; 3117 qp[i]->tx_vq.err_pkts = 0; 3118 qp[i]->rx_vq.rx_pkts = 0; 3119 qp[i]->rx_vq.tx_pkts = 0; 3120 qp[i]->rx_vq.err_pkts = 0; 3121 } 3122 } 3123 3124 static void __attribute__((hot)) 3125 dpaa2_sec_process_parallel_event(struct qbman_swp *swp, 3126 const struct qbman_fd *fd, 3127 const struct qbman_result *dq, 3128 struct dpaa2_queue *rxq, 3129 struct rte_event *ev) 3130 { 3131 /* Prefetching mbuf */ 3132 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)- 3133 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size)); 3134 3135 /* Prefetching ipsec crypto_op stored in priv data of mbuf */ 3136 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64)); 3137 3138 ev->flow_id = rxq->ev.flow_id; 3139 ev->sub_event_type = rxq->ev.sub_event_type; 3140 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3141 ev->op = RTE_EVENT_OP_NEW; 3142 ev->sched_type = rxq->ev.sched_type; 3143 ev->queue_id = rxq->ev.queue_id; 3144 ev->priority = rxq->ev.priority; 3145 ev->event_ptr = sec_fd_to_mbuf(fd, ((struct rte_cryptodev *) 3146 (rxq->dev))->driver_id); 3147 3148 qbman_swp_dqrr_consume(swp, dq); 3149 } 3150 static void 3151 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __attribute__((unused)), 3152 const struct qbman_fd *fd, 3153 const struct qbman_result *dq, 3154 struct dpaa2_queue *rxq, 3155 struct rte_event *ev) 3156 { 3157 uint8_t dqrr_index; 3158 struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr; 3159 /* Prefetching mbuf */ 3160 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)- 3161 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size)); 3162 3163 /* Prefetching ipsec crypto_op stored in priv data of mbuf */ 3164 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64)); 3165 3166 ev->flow_id = rxq->ev.flow_id; 3167 ev->sub_event_type = rxq->ev.sub_event_type; 3168 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; 3169 ev->op = RTE_EVENT_OP_NEW; 3170 ev->sched_type = rxq->ev.sched_type; 3171 ev->queue_id = rxq->ev.queue_id; 3172 ev->priority = rxq->ev.priority; 3173 3174 ev->event_ptr = sec_fd_to_mbuf(fd, ((struct rte_cryptodev *) 3175 (rxq->dev))->driver_id); 3176 dqrr_index = qbman_get_dqrr_idx(dq); 3177 crypto_op->sym->m_src->seqn = dqrr_index + 1; 3178 DPAA2_PER_LCORE_DQRR_SIZE++; 3179 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; 3180 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src; 3181 } 3182 3183 int 3184 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev, 3185 int qp_id, 3186 uint16_t dpcon_id, 3187 const struct rte_event *event) 3188 { 3189 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3190 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3191 struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id]; 3192 struct dpseci_rx_queue_cfg cfg; 3193 int ret; 3194 3195 if (event->sched_type == RTE_SCHED_TYPE_PARALLEL) 3196 qp->rx_vq.cb = dpaa2_sec_process_parallel_event; 3197 else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) 3198 qp->rx_vq.cb = dpaa2_sec_process_atomic_event; 3199 else 3200 return -EINVAL; 3201 3202 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 3203 cfg.options = DPSECI_QUEUE_OPT_DEST; 3204 cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON; 3205 cfg.dest_cfg.dest_id = dpcon_id; 3206 cfg.dest_cfg.priority = event->priority; 3207 3208 cfg.options |= DPSECI_QUEUE_OPT_USER_CTX; 3209 cfg.user_ctx = (size_t)(qp); 3210 if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) { 3211 cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION; 3212 cfg.order_preservation_en = 1; 3213 } 3214 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 3215 qp_id, &cfg); 3216 if (ret) { 3217 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret); 3218 return ret; 3219 } 3220 3221 memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event)); 3222 3223 return 0; 3224 } 3225 3226 int 3227 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev, 3228 int qp_id) 3229 { 3230 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 3231 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 3232 struct dpseci_rx_queue_cfg cfg; 3233 int ret; 3234 3235 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 3236 cfg.options = DPSECI_QUEUE_OPT_DEST; 3237 cfg.dest_cfg.dest_type = DPSECI_DEST_NONE; 3238 3239 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 3240 qp_id, &cfg); 3241 if (ret) 3242 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret); 3243 3244 return ret; 3245 } 3246 3247 static struct rte_cryptodev_ops crypto_ops = { 3248 .dev_configure = dpaa2_sec_dev_configure, 3249 .dev_start = dpaa2_sec_dev_start, 3250 .dev_stop = dpaa2_sec_dev_stop, 3251 .dev_close = dpaa2_sec_dev_close, 3252 .dev_infos_get = dpaa2_sec_dev_infos_get, 3253 .stats_get = dpaa2_sec_stats_get, 3254 .stats_reset = dpaa2_sec_stats_reset, 3255 .queue_pair_setup = dpaa2_sec_queue_pair_setup, 3256 .queue_pair_release = dpaa2_sec_queue_pair_release, 3257 .queue_pair_count = dpaa2_sec_queue_pair_count, 3258 .sym_session_get_size = dpaa2_sec_sym_session_get_size, 3259 .sym_session_configure = dpaa2_sec_sym_session_configure, 3260 .sym_session_clear = dpaa2_sec_sym_session_clear, 3261 }; 3262 3263 static const struct rte_security_capability * 3264 dpaa2_sec_capabilities_get(void *device __rte_unused) 3265 { 3266 return dpaa2_sec_security_cap; 3267 } 3268 3269 static const struct rte_security_ops dpaa2_sec_security_ops = { 3270 .session_create = dpaa2_sec_security_session_create, 3271 .session_update = NULL, 3272 .session_stats_get = NULL, 3273 .session_destroy = dpaa2_sec_security_session_destroy, 3274 .set_pkt_metadata = NULL, 3275 .capabilities_get = dpaa2_sec_capabilities_get 3276 }; 3277 3278 static int 3279 dpaa2_sec_uninit(const struct rte_cryptodev *dev) 3280 { 3281 struct dpaa2_sec_dev_private *internals = dev->data->dev_private; 3282 3283 rte_free(dev->security_ctx); 3284 3285 rte_mempool_free(internals->fle_pool); 3286 3287 DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u", 3288 dev->data->name, rte_socket_id()); 3289 3290 return 0; 3291 } 3292 3293 static int 3294 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev) 3295 { 3296 struct dpaa2_sec_dev_private *internals; 3297 struct rte_device *dev = cryptodev->device; 3298 struct rte_dpaa2_device *dpaa2_dev; 3299 struct rte_security_ctx *security_instance; 3300 struct fsl_mc_io *dpseci; 3301 uint16_t token; 3302 struct dpseci_attr attr; 3303 int retcode, hw_id; 3304 char str[20]; 3305 3306 PMD_INIT_FUNC_TRACE(); 3307 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 3308 if (dpaa2_dev == NULL) { 3309 DPAA2_SEC_ERR("DPAA2 SEC device not found"); 3310 return -1; 3311 } 3312 hw_id = dpaa2_dev->object_id; 3313 3314 cryptodev->driver_id = cryptodev_driver_id; 3315 cryptodev->dev_ops = &crypto_ops; 3316 3317 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst; 3318 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst; 3319 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 3320 RTE_CRYPTODEV_FF_HW_ACCELERATED | 3321 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 3322 RTE_CRYPTODEV_FF_SECURITY | 3323 RTE_CRYPTODEV_FF_IN_PLACE_SGL | 3324 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | 3325 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | 3326 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | 3327 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; 3328 3329 internals = cryptodev->data->dev_private; 3330 3331 /* 3332 * For secondary processes, we don't initialise any further as primary 3333 * has already done this work. Only check we don't need a different 3334 * RX function 3335 */ 3336 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 3337 DPAA2_SEC_DEBUG("Device already init by primary process"); 3338 return 0; 3339 } 3340 3341 /* Initialize security_ctx only for primary process*/ 3342 security_instance = rte_malloc("rte_security_instances_ops", 3343 sizeof(struct rte_security_ctx), 0); 3344 if (security_instance == NULL) 3345 return -ENOMEM; 3346 security_instance->device = (void *)cryptodev; 3347 security_instance->ops = &dpaa2_sec_security_ops; 3348 security_instance->sess_cnt = 0; 3349 cryptodev->security_ctx = security_instance; 3350 3351 /*Open the rte device via MC and save the handle for further use*/ 3352 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1, 3353 sizeof(struct fsl_mc_io), 0); 3354 if (!dpseci) { 3355 DPAA2_SEC_ERR( 3356 "Error in allocating the memory for dpsec object"); 3357 return -1; 3358 } 3359 dpseci->regs = rte_mcp_ptr_list[0]; 3360 3361 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token); 3362 if (retcode != 0) { 3363 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x", 3364 retcode); 3365 goto init_error; 3366 } 3367 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr); 3368 if (retcode != 0) { 3369 DPAA2_SEC_ERR( 3370 "Cannot get dpsec device attributed: Error = %x", 3371 retcode); 3372 goto init_error; 3373 } 3374 snprintf(cryptodev->data->name, sizeof(cryptodev->data->name), 3375 "dpsec-%u", hw_id); 3376 3377 internals->max_nb_queue_pairs = attr.num_tx_queues; 3378 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs; 3379 internals->hw = dpseci; 3380 internals->token = token; 3381 3382 snprintf(str, sizeof(str), "fle_pool_%d", cryptodev->data->dev_id); 3383 internals->fle_pool = rte_mempool_create((const char *)str, 3384 FLE_POOL_NUM_BUFS, 3385 FLE_POOL_BUF_SIZE, 3386 FLE_POOL_CACHE_SIZE, 0, 3387 NULL, NULL, NULL, NULL, 3388 SOCKET_ID_ANY, 0); 3389 if (!internals->fle_pool) { 3390 DPAA2_SEC_ERR("Mempool (%s) creation failed", str); 3391 goto init_error; 3392 } 3393 3394 DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name); 3395 return 0; 3396 3397 init_error: 3398 DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name); 3399 3400 /* dpaa2_sec_uninit(crypto_dev_name); */ 3401 return -EFAULT; 3402 } 3403 3404 static int 3405 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused, 3406 struct rte_dpaa2_device *dpaa2_dev) 3407 { 3408 struct rte_cryptodev *cryptodev; 3409 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 3410 3411 int retval; 3412 3413 snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d", 3414 dpaa2_dev->object_id); 3415 3416 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id()); 3417 if (cryptodev == NULL) 3418 return -ENOMEM; 3419 3420 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 3421 cryptodev->data->dev_private = rte_zmalloc_socket( 3422 "cryptodev private structure", 3423 sizeof(struct dpaa2_sec_dev_private), 3424 RTE_CACHE_LINE_SIZE, 3425 rte_socket_id()); 3426 3427 if (cryptodev->data->dev_private == NULL) 3428 rte_panic("Cannot allocate memzone for private " 3429 "device data"); 3430 } 3431 3432 dpaa2_dev->cryptodev = cryptodev; 3433 cryptodev->device = &dpaa2_dev->device; 3434 3435 /* init user callbacks */ 3436 TAILQ_INIT(&(cryptodev->link_intr_cbs)); 3437 3438 /* Invoke PMD device initialization function */ 3439 retval = dpaa2_sec_dev_init(cryptodev); 3440 if (retval == 0) 3441 return 0; 3442 3443 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 3444 rte_free(cryptodev->data->dev_private); 3445 3446 cryptodev->attached = RTE_CRYPTODEV_DETACHED; 3447 3448 return -ENXIO; 3449 } 3450 3451 static int 3452 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev) 3453 { 3454 struct rte_cryptodev *cryptodev; 3455 int ret; 3456 3457 cryptodev = dpaa2_dev->cryptodev; 3458 if (cryptodev == NULL) 3459 return -ENODEV; 3460 3461 ret = dpaa2_sec_uninit(cryptodev); 3462 if (ret) 3463 return ret; 3464 3465 return rte_cryptodev_pmd_destroy(cryptodev); 3466 } 3467 3468 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = { 3469 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA, 3470 .drv_type = DPAA2_CRYPTO, 3471 .driver = { 3472 .name = "DPAA2 SEC PMD" 3473 }, 3474 .probe = cryptodev_dpaa2_sec_probe, 3475 .remove = cryptodev_dpaa2_sec_remove, 3476 }; 3477 3478 static struct cryptodev_driver dpaa2_sec_crypto_drv; 3479 3480 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver); 3481 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, 3482 rte_dpaa2_sec_driver.driver, cryptodev_driver_id); 3483 3484 RTE_INIT(dpaa2_sec_init_log) 3485 { 3486 /* Bus level logs */ 3487 dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2"); 3488 if (dpaa2_logtype_sec >= 0) 3489 rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE); 3490 } 3491