1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2016 NXP 5 * 6 */ 7 8 #include <time.h> 9 #include <net/if.h> 10 11 #include <rte_mbuf.h> 12 #include <rte_cryptodev.h> 13 #include <rte_security_driver.h> 14 #include <rte_malloc.h> 15 #include <rte_memcpy.h> 16 #include <rte_string_fns.h> 17 #include <rte_cycles.h> 18 #include <rte_kvargs.h> 19 #include <rte_dev.h> 20 #include <rte_cryptodev_pmd.h> 21 #include <rte_common.h> 22 #include <rte_fslmc.h> 23 #include <fslmc_vfio.h> 24 #include <dpaa2_hw_pvt.h> 25 #include <dpaa2_hw_dpio.h> 26 #include <dpaa2_hw_mempool.h> 27 #include <fsl_dpseci.h> 28 #include <fsl_mc_sys.h> 29 30 #include "dpaa2_sec_priv.h" 31 #include "dpaa2_sec_logs.h" 32 33 /* Required types */ 34 typedef uint64_t dma_addr_t; 35 36 /* RTA header files */ 37 #include <hw/desc/ipsec.h> 38 #include <hw/desc/algo.h> 39 40 /* Minimum job descriptor consists of a oneword job descriptor HEADER and 41 * a pointer to the shared descriptor 42 */ 43 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ) 44 #define FSL_VENDOR_ID 0x1957 45 #define FSL_DEVICE_ID 0x410 46 #define FSL_SUBSYSTEM_SEC 1 47 #define FSL_MC_DPSECI_DEVID 3 48 49 #define NO_PREFETCH 0 50 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */ 51 #define FLE_POOL_NUM_BUFS 32000 52 #define FLE_POOL_BUF_SIZE 256 53 #define FLE_POOL_CACHE_SIZE 512 54 #define FLE_SG_MEM_SIZE 2048 55 #define SEC_FLC_DHR_OUTBOUND -114 56 #define SEC_FLC_DHR_INBOUND 0 57 58 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8; 59 60 static uint8_t cryptodev_driver_id; 61 62 int dpaa2_logtype_sec; 63 64 static inline int 65 build_proto_compound_fd(dpaa2_sec_session *sess, 66 struct rte_crypto_op *op, 67 struct qbman_fd *fd, uint16_t bpid) 68 { 69 struct rte_crypto_sym_op *sym_op = op->sym; 70 struct ctxt_priv *priv = sess->ctxt; 71 struct qbman_fle *fle, *ip_fle, *op_fle; 72 struct sec_flow_context *flc; 73 struct rte_mbuf *src_mbuf = sym_op->m_src; 74 struct rte_mbuf *dst_mbuf = sym_op->m_dst; 75 int retval; 76 77 /* Save the shared descriptor */ 78 flc = &priv->flc_desc[0].flc; 79 80 /* we are using the first FLE entry to store Mbuf */ 81 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 82 if (retval) { 83 DPAA2_SEC_ERR("Memory alloc failed"); 84 return -1; 85 } 86 memset(fle, 0, FLE_POOL_BUF_SIZE); 87 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 88 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 89 90 op_fle = fle + 1; 91 ip_fle = fle + 2; 92 93 if (likely(bpid < MAX_BPID)) { 94 DPAA2_SET_FD_BPID(fd, bpid); 95 DPAA2_SET_FLE_BPID(op_fle, bpid); 96 DPAA2_SET_FLE_BPID(ip_fle, bpid); 97 } else { 98 DPAA2_SET_FD_IVP(fd); 99 DPAA2_SET_FLE_IVP(op_fle); 100 DPAA2_SET_FLE_IVP(ip_fle); 101 } 102 103 /* Configure FD as a FRAME LIST */ 104 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 105 DPAA2_SET_FD_COMPOUND_FMT(fd); 106 DPAA2_SET_FD_FLC(fd, (ptrdiff_t)flc); 107 108 /* Configure Output FLE with dst mbuf data */ 109 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf)); 110 DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off); 111 DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len); 112 113 /* Configure Input FLE with src mbuf data */ 114 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf)); 115 DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off); 116 DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len); 117 118 DPAA2_SET_FD_LEN(fd, ip_fle->length); 119 DPAA2_SET_FLE_FIN(ip_fle); 120 121 return 0; 122 123 } 124 125 static inline int 126 build_proto_fd(dpaa2_sec_session *sess, 127 struct rte_crypto_op *op, 128 struct qbman_fd *fd, uint16_t bpid) 129 { 130 struct rte_crypto_sym_op *sym_op = op->sym; 131 if (sym_op->m_dst) 132 return build_proto_compound_fd(sess, op, fd, bpid); 133 134 struct ctxt_priv *priv = sess->ctxt; 135 struct sec_flow_context *flc; 136 struct rte_mbuf *mbuf = sym_op->m_src; 137 138 if (likely(bpid < MAX_BPID)) 139 DPAA2_SET_FD_BPID(fd, bpid); 140 else 141 DPAA2_SET_FD_IVP(fd); 142 143 /* Save the shared descriptor */ 144 flc = &priv->flc_desc[0].flc; 145 146 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 147 DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off); 148 DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len); 149 DPAA2_SET_FD_FLC(fd, (ptrdiff_t)flc); 150 151 /* save physical address of mbuf */ 152 op->sym->aead.digest.phys_addr = mbuf->buf_iova; 153 mbuf->buf_iova = (size_t)op; 154 155 return 0; 156 } 157 158 static inline int 159 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess, 160 struct rte_crypto_op *op, 161 struct qbman_fd *fd, __rte_unused uint16_t bpid) 162 { 163 struct rte_crypto_sym_op *sym_op = op->sym; 164 struct ctxt_priv *priv = sess->ctxt; 165 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 166 struct sec_flow_context *flc; 167 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len; 168 int icv_len = sess->digest_length; 169 uint8_t *old_icv; 170 struct rte_mbuf *mbuf; 171 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 172 sess->iv.offset); 173 174 PMD_INIT_FUNC_TRACE(); 175 176 if (sym_op->m_dst) 177 mbuf = sym_op->m_dst; 178 else 179 mbuf = sym_op->m_src; 180 181 /* first FLE entry used to store mbuf and session ctxt */ 182 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE, 183 RTE_CACHE_LINE_SIZE); 184 if (unlikely(!fle)) { 185 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE"); 186 return -1; 187 } 188 memset(fle, 0, FLE_SG_MEM_SIZE); 189 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 190 DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv); 191 192 op_fle = fle + 1; 193 ip_fle = fle + 2; 194 sge = fle + 3; 195 196 /* Save the shared descriptor */ 197 flc = &priv->flc_desc[0].flc; 198 199 /* Configure FD as a FRAME LIST */ 200 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 201 DPAA2_SET_FD_COMPOUND_FMT(fd); 202 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 203 204 DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n" 205 "iv-len=%d data_off: 0x%x\n", 206 sym_op->aead.data.offset, 207 sym_op->aead.data.length, 208 sess->digest_length, 209 sess->iv.length, 210 sym_op->m_src->data_off); 211 212 /* Configure Output FLE with Scatter/Gather Entry */ 213 DPAA2_SET_FLE_SG_EXT(op_fle); 214 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 215 216 if (auth_only_len) 217 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len); 218 219 op_fle->length = (sess->dir == DIR_ENC) ? 220 (sym_op->aead.data.length + icv_len + auth_only_len) : 221 sym_op->aead.data.length + auth_only_len; 222 223 /* Configure Output SGE for Encap/Decap */ 224 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 225 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset - 226 auth_only_len); 227 sge->length = mbuf->data_len - sym_op->aead.data.offset + auth_only_len; 228 229 mbuf = mbuf->next; 230 /* o/p segs */ 231 while (mbuf) { 232 sge++; 233 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 234 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 235 sge->length = mbuf->data_len; 236 mbuf = mbuf->next; 237 } 238 sge->length -= icv_len; 239 240 if (sess->dir == DIR_ENC) { 241 sge++; 242 DPAA2_SET_FLE_ADDR(sge, 243 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data)); 244 sge->length = icv_len; 245 } 246 DPAA2_SET_FLE_FIN(sge); 247 248 sge++; 249 mbuf = sym_op->m_src; 250 251 /* Configure Input FLE with Scatter/Gather Entry */ 252 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 253 DPAA2_SET_FLE_SG_EXT(ip_fle); 254 DPAA2_SET_FLE_FIN(ip_fle); 255 ip_fle->length = (sess->dir == DIR_ENC) ? 256 (sym_op->aead.data.length + sess->iv.length + auth_only_len) : 257 (sym_op->aead.data.length + sess->iv.length + auth_only_len + 258 icv_len); 259 260 /* Configure Input SGE for Encap/Decap */ 261 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr)); 262 sge->length = sess->iv.length; 263 264 sge++; 265 if (auth_only_len) { 266 DPAA2_SET_FLE_ADDR(sge, 267 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data)); 268 sge->length = auth_only_len; 269 sge++; 270 } 271 272 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 273 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + 274 mbuf->data_off); 275 sge->length = mbuf->data_len - sym_op->aead.data.offset; 276 277 mbuf = mbuf->next; 278 /* i/p segs */ 279 while (mbuf) { 280 sge++; 281 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 282 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 283 sge->length = mbuf->data_len; 284 mbuf = mbuf->next; 285 } 286 287 if (sess->dir == DIR_DEC) { 288 sge++; 289 old_icv = (uint8_t *)(sge + 1); 290 memcpy(old_icv, sym_op->aead.digest.data, icv_len); 291 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 292 sge->length = icv_len; 293 } 294 295 DPAA2_SET_FLE_FIN(sge); 296 if (auth_only_len) { 297 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len); 298 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 299 } 300 DPAA2_SET_FD_LEN(fd, ip_fle->length); 301 302 return 0; 303 } 304 305 static inline int 306 build_authenc_gcm_fd(dpaa2_sec_session *sess, 307 struct rte_crypto_op *op, 308 struct qbman_fd *fd, uint16_t bpid) 309 { 310 struct rte_crypto_sym_op *sym_op = op->sym; 311 struct ctxt_priv *priv = sess->ctxt; 312 struct qbman_fle *fle, *sge; 313 struct sec_flow_context *flc; 314 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len; 315 int icv_len = sess->digest_length, retval; 316 uint8_t *old_icv; 317 struct rte_mbuf *dst; 318 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 319 sess->iv.offset); 320 321 PMD_INIT_FUNC_TRACE(); 322 323 if (sym_op->m_dst) 324 dst = sym_op->m_dst; 325 else 326 dst = sym_op->m_src; 327 328 /* TODO we are using the first FLE entry to store Mbuf and session ctxt. 329 * Currently we donot know which FLE has the mbuf stored. 330 * So while retreiving we can go back 1 FLE from the FD -ADDR 331 * to get the MBUF Addr from the previous FLE. 332 * We can have a better approach to use the inline Mbuf 333 */ 334 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 335 if (retval) { 336 DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE"); 337 return -1; 338 } 339 memset(fle, 0, FLE_POOL_BUF_SIZE); 340 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 341 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 342 fle = fle + 1; 343 sge = fle + 2; 344 if (likely(bpid < MAX_BPID)) { 345 DPAA2_SET_FD_BPID(fd, bpid); 346 DPAA2_SET_FLE_BPID(fle, bpid); 347 DPAA2_SET_FLE_BPID(fle + 1, bpid); 348 DPAA2_SET_FLE_BPID(sge, bpid); 349 DPAA2_SET_FLE_BPID(sge + 1, bpid); 350 DPAA2_SET_FLE_BPID(sge + 2, bpid); 351 DPAA2_SET_FLE_BPID(sge + 3, bpid); 352 } else { 353 DPAA2_SET_FD_IVP(fd); 354 DPAA2_SET_FLE_IVP(fle); 355 DPAA2_SET_FLE_IVP((fle + 1)); 356 DPAA2_SET_FLE_IVP(sge); 357 DPAA2_SET_FLE_IVP((sge + 1)); 358 DPAA2_SET_FLE_IVP((sge + 2)); 359 DPAA2_SET_FLE_IVP((sge + 3)); 360 } 361 362 /* Save the shared descriptor */ 363 flc = &priv->flc_desc[0].flc; 364 /* Configure FD as a FRAME LIST */ 365 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 366 DPAA2_SET_FD_COMPOUND_FMT(fd); 367 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 368 369 DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n" 370 "iv-len=%d data_off: 0x%x\n", 371 sym_op->aead.data.offset, 372 sym_op->aead.data.length, 373 sess->digest_length, 374 sess->iv.length, 375 sym_op->m_src->data_off); 376 377 /* Configure Output FLE with Scatter/Gather Entry */ 378 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 379 if (auth_only_len) 380 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 381 fle->length = (sess->dir == DIR_ENC) ? 382 (sym_op->aead.data.length + icv_len + auth_only_len) : 383 sym_op->aead.data.length + auth_only_len; 384 385 DPAA2_SET_FLE_SG_EXT(fle); 386 387 /* Configure Output SGE for Encap/Decap */ 388 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 389 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + 390 dst->data_off - auth_only_len); 391 sge->length = sym_op->aead.data.length + auth_only_len; 392 393 if (sess->dir == DIR_ENC) { 394 sge++; 395 DPAA2_SET_FLE_ADDR(sge, 396 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data)); 397 sge->length = sess->digest_length; 398 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length + 399 sess->iv.length + auth_only_len)); 400 } 401 DPAA2_SET_FLE_FIN(sge); 402 403 sge++; 404 fle++; 405 406 /* Configure Input FLE with Scatter/Gather Entry */ 407 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 408 DPAA2_SET_FLE_SG_EXT(fle); 409 DPAA2_SET_FLE_FIN(fle); 410 fle->length = (sess->dir == DIR_ENC) ? 411 (sym_op->aead.data.length + sess->iv.length + auth_only_len) : 412 (sym_op->aead.data.length + sess->iv.length + auth_only_len + 413 sess->digest_length); 414 415 /* Configure Input SGE for Encap/Decap */ 416 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr)); 417 sge->length = sess->iv.length; 418 sge++; 419 if (auth_only_len) { 420 DPAA2_SET_FLE_ADDR(sge, 421 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data)); 422 sge->length = auth_only_len; 423 DPAA2_SET_FLE_BPID(sge, bpid); 424 sge++; 425 } 426 427 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 428 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + 429 sym_op->m_src->data_off); 430 sge->length = sym_op->aead.data.length; 431 if (sess->dir == DIR_DEC) { 432 sge++; 433 old_icv = (uint8_t *)(sge + 1); 434 memcpy(old_icv, sym_op->aead.digest.data, 435 sess->digest_length); 436 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 437 sge->length = sess->digest_length; 438 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length + 439 sess->digest_length + 440 sess->iv.length + 441 auth_only_len)); 442 } 443 DPAA2_SET_FLE_FIN(sge); 444 445 if (auth_only_len) { 446 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 447 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 448 } 449 450 return 0; 451 } 452 453 static inline int 454 build_authenc_sg_fd(dpaa2_sec_session *sess, 455 struct rte_crypto_op *op, 456 struct qbman_fd *fd, __rte_unused uint16_t bpid) 457 { 458 struct rte_crypto_sym_op *sym_op = op->sym; 459 struct ctxt_priv *priv = sess->ctxt; 460 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 461 struct sec_flow_context *flc; 462 uint32_t auth_only_len = sym_op->auth.data.length - 463 sym_op->cipher.data.length; 464 int icv_len = sess->digest_length; 465 uint8_t *old_icv; 466 struct rte_mbuf *mbuf; 467 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 468 sess->iv.offset); 469 470 PMD_INIT_FUNC_TRACE(); 471 472 if (sym_op->m_dst) 473 mbuf = sym_op->m_dst; 474 else 475 mbuf = sym_op->m_src; 476 477 /* first FLE entry used to store mbuf and session ctxt */ 478 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE, 479 RTE_CACHE_LINE_SIZE); 480 if (unlikely(!fle)) { 481 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE"); 482 return -1; 483 } 484 memset(fle, 0, FLE_SG_MEM_SIZE); 485 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 486 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 487 488 op_fle = fle + 1; 489 ip_fle = fle + 2; 490 sge = fle + 3; 491 492 /* Save the shared descriptor */ 493 flc = &priv->flc_desc[0].flc; 494 495 /* Configure FD as a FRAME LIST */ 496 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 497 DPAA2_SET_FD_COMPOUND_FMT(fd); 498 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 499 500 DPAA2_SEC_DP_DEBUG( 501 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n" 502 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", 503 sym_op->auth.data.offset, 504 sym_op->auth.data.length, 505 sess->digest_length, 506 sym_op->cipher.data.offset, 507 sym_op->cipher.data.length, 508 sess->iv.length, 509 sym_op->m_src->data_off); 510 511 /* Configure Output FLE with Scatter/Gather Entry */ 512 DPAA2_SET_FLE_SG_EXT(op_fle); 513 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 514 515 if (auth_only_len) 516 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len); 517 518 op_fle->length = (sess->dir == DIR_ENC) ? 519 (sym_op->cipher.data.length + icv_len) : 520 sym_op->cipher.data.length; 521 522 /* Configure Output SGE for Encap/Decap */ 523 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 524 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset); 525 sge->length = mbuf->data_len - sym_op->auth.data.offset; 526 527 mbuf = mbuf->next; 528 /* o/p segs */ 529 while (mbuf) { 530 sge++; 531 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 532 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 533 sge->length = mbuf->data_len; 534 mbuf = mbuf->next; 535 } 536 sge->length -= icv_len; 537 538 if (sess->dir == DIR_ENC) { 539 sge++; 540 DPAA2_SET_FLE_ADDR(sge, 541 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 542 sge->length = icv_len; 543 } 544 DPAA2_SET_FLE_FIN(sge); 545 546 sge++; 547 mbuf = sym_op->m_src; 548 549 /* Configure Input FLE with Scatter/Gather Entry */ 550 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 551 DPAA2_SET_FLE_SG_EXT(ip_fle); 552 DPAA2_SET_FLE_FIN(ip_fle); 553 ip_fle->length = (sess->dir == DIR_ENC) ? 554 (sym_op->auth.data.length + sess->iv.length) : 555 (sym_op->auth.data.length + sess->iv.length + 556 icv_len); 557 558 /* Configure Input SGE for Encap/Decap */ 559 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 560 sge->length = sess->iv.length; 561 562 sge++; 563 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 564 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + 565 mbuf->data_off); 566 sge->length = mbuf->data_len - sym_op->auth.data.offset; 567 568 mbuf = mbuf->next; 569 /* i/p segs */ 570 while (mbuf) { 571 sge++; 572 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 573 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 574 sge->length = mbuf->data_len; 575 mbuf = mbuf->next; 576 } 577 sge->length -= icv_len; 578 579 if (sess->dir == DIR_DEC) { 580 sge++; 581 old_icv = (uint8_t *)(sge + 1); 582 memcpy(old_icv, sym_op->auth.digest.data, 583 icv_len); 584 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 585 sge->length = icv_len; 586 } 587 588 DPAA2_SET_FLE_FIN(sge); 589 if (auth_only_len) { 590 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len); 591 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 592 } 593 DPAA2_SET_FD_LEN(fd, ip_fle->length); 594 595 return 0; 596 } 597 598 static inline int 599 build_authenc_fd(dpaa2_sec_session *sess, 600 struct rte_crypto_op *op, 601 struct qbman_fd *fd, uint16_t bpid) 602 { 603 struct rte_crypto_sym_op *sym_op = op->sym; 604 struct ctxt_priv *priv = sess->ctxt; 605 struct qbman_fle *fle, *sge; 606 struct sec_flow_context *flc; 607 uint32_t auth_only_len = sym_op->auth.data.length - 608 sym_op->cipher.data.length; 609 int icv_len = sess->digest_length, retval; 610 uint8_t *old_icv; 611 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 612 sess->iv.offset); 613 struct rte_mbuf *dst; 614 615 PMD_INIT_FUNC_TRACE(); 616 617 if (sym_op->m_dst) 618 dst = sym_op->m_dst; 619 else 620 dst = sym_op->m_src; 621 622 /* we are using the first FLE entry to store Mbuf. 623 * Currently we donot know which FLE has the mbuf stored. 624 * So while retreiving we can go back 1 FLE from the FD -ADDR 625 * to get the MBUF Addr from the previous FLE. 626 * We can have a better approach to use the inline Mbuf 627 */ 628 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 629 if (retval) { 630 DPAA2_SEC_ERR("Memory alloc failed for SGE"); 631 return -1; 632 } 633 memset(fle, 0, FLE_POOL_BUF_SIZE); 634 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 635 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 636 fle = fle + 1; 637 sge = fle + 2; 638 if (likely(bpid < MAX_BPID)) { 639 DPAA2_SET_FD_BPID(fd, bpid); 640 DPAA2_SET_FLE_BPID(fle, bpid); 641 DPAA2_SET_FLE_BPID(fle + 1, bpid); 642 DPAA2_SET_FLE_BPID(sge, bpid); 643 DPAA2_SET_FLE_BPID(sge + 1, bpid); 644 DPAA2_SET_FLE_BPID(sge + 2, bpid); 645 DPAA2_SET_FLE_BPID(sge + 3, bpid); 646 } else { 647 DPAA2_SET_FD_IVP(fd); 648 DPAA2_SET_FLE_IVP(fle); 649 DPAA2_SET_FLE_IVP((fle + 1)); 650 DPAA2_SET_FLE_IVP(sge); 651 DPAA2_SET_FLE_IVP((sge + 1)); 652 DPAA2_SET_FLE_IVP((sge + 2)); 653 DPAA2_SET_FLE_IVP((sge + 3)); 654 } 655 656 /* Save the shared descriptor */ 657 flc = &priv->flc_desc[0].flc; 658 /* Configure FD as a FRAME LIST */ 659 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 660 DPAA2_SET_FD_COMPOUND_FMT(fd); 661 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 662 663 DPAA2_SEC_DP_DEBUG( 664 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n" 665 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", 666 sym_op->auth.data.offset, 667 sym_op->auth.data.length, 668 sess->digest_length, 669 sym_op->cipher.data.offset, 670 sym_op->cipher.data.length, 671 sess->iv.length, 672 sym_op->m_src->data_off); 673 674 /* Configure Output FLE with Scatter/Gather Entry */ 675 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 676 if (auth_only_len) 677 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 678 fle->length = (sess->dir == DIR_ENC) ? 679 (sym_op->cipher.data.length + icv_len) : 680 sym_op->cipher.data.length; 681 682 DPAA2_SET_FLE_SG_EXT(fle); 683 684 /* Configure Output SGE for Encap/Decap */ 685 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 686 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + 687 dst->data_off); 688 sge->length = sym_op->cipher.data.length; 689 690 if (sess->dir == DIR_ENC) { 691 sge++; 692 DPAA2_SET_FLE_ADDR(sge, 693 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 694 sge->length = sess->digest_length; 695 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 696 sess->iv.length)); 697 } 698 DPAA2_SET_FLE_FIN(sge); 699 700 sge++; 701 fle++; 702 703 /* Configure Input FLE with Scatter/Gather Entry */ 704 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 705 DPAA2_SET_FLE_SG_EXT(fle); 706 DPAA2_SET_FLE_FIN(fle); 707 fle->length = (sess->dir == DIR_ENC) ? 708 (sym_op->auth.data.length + sess->iv.length) : 709 (sym_op->auth.data.length + sess->iv.length + 710 sess->digest_length); 711 712 /* Configure Input SGE for Encap/Decap */ 713 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 714 sge->length = sess->iv.length; 715 sge++; 716 717 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 718 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + 719 sym_op->m_src->data_off); 720 sge->length = sym_op->auth.data.length; 721 if (sess->dir == DIR_DEC) { 722 sge++; 723 old_icv = (uint8_t *)(sge + 1); 724 memcpy(old_icv, sym_op->auth.digest.data, 725 sess->digest_length); 726 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 727 sge->length = sess->digest_length; 728 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 729 sess->digest_length + 730 sess->iv.length)); 731 } 732 DPAA2_SET_FLE_FIN(sge); 733 if (auth_only_len) { 734 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 735 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 736 } 737 return 0; 738 } 739 740 static inline int build_auth_sg_fd( 741 dpaa2_sec_session *sess, 742 struct rte_crypto_op *op, 743 struct qbman_fd *fd, 744 __rte_unused uint16_t bpid) 745 { 746 struct rte_crypto_sym_op *sym_op = op->sym; 747 struct qbman_fle *fle, *sge, *ip_fle, *op_fle; 748 struct sec_flow_context *flc; 749 struct ctxt_priv *priv = sess->ctxt; 750 uint8_t *old_digest; 751 struct rte_mbuf *mbuf; 752 753 PMD_INIT_FUNC_TRACE(); 754 755 mbuf = sym_op->m_src; 756 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE, 757 RTE_CACHE_LINE_SIZE); 758 if (unlikely(!fle)) { 759 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE"); 760 return -1; 761 } 762 memset(fle, 0, FLE_SG_MEM_SIZE); 763 /* first FLE entry used to store mbuf and session ctxt */ 764 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 765 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 766 op_fle = fle + 1; 767 ip_fle = fle + 2; 768 sge = fle + 3; 769 770 flc = &priv->flc_desc[DESC_INITFINAL].flc; 771 /* sg FD */ 772 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 773 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 774 DPAA2_SET_FD_COMPOUND_FMT(fd); 775 776 /* o/p fle */ 777 DPAA2_SET_FLE_ADDR(op_fle, 778 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 779 op_fle->length = sess->digest_length; 780 781 /* i/p fle */ 782 DPAA2_SET_FLE_SG_EXT(ip_fle); 783 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 784 /* i/p 1st seg */ 785 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 786 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + mbuf->data_off); 787 sge->length = mbuf->data_len - sym_op->auth.data.offset; 788 789 /* i/p segs */ 790 mbuf = mbuf->next; 791 while (mbuf) { 792 sge++; 793 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 794 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 795 sge->length = mbuf->data_len; 796 mbuf = mbuf->next; 797 } 798 if (sess->dir == DIR_ENC) { 799 /* Digest calculation case */ 800 sge->length -= sess->digest_length; 801 ip_fle->length = sym_op->auth.data.length; 802 } else { 803 /* Digest verification case */ 804 sge++; 805 old_digest = (uint8_t *)(sge + 1); 806 rte_memcpy(old_digest, sym_op->auth.digest.data, 807 sess->digest_length); 808 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest)); 809 sge->length = sess->digest_length; 810 ip_fle->length = sym_op->auth.data.length + 811 sess->digest_length; 812 } 813 DPAA2_SET_FLE_FIN(sge); 814 DPAA2_SET_FLE_FIN(ip_fle); 815 DPAA2_SET_FD_LEN(fd, ip_fle->length); 816 817 return 0; 818 } 819 820 static inline int 821 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 822 struct qbman_fd *fd, uint16_t bpid) 823 { 824 struct rte_crypto_sym_op *sym_op = op->sym; 825 struct qbman_fle *fle, *sge; 826 struct sec_flow_context *flc; 827 struct ctxt_priv *priv = sess->ctxt; 828 uint8_t *old_digest; 829 int retval; 830 831 PMD_INIT_FUNC_TRACE(); 832 833 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 834 if (retval) { 835 DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE"); 836 return -1; 837 } 838 memset(fle, 0, FLE_POOL_BUF_SIZE); 839 /* TODO we are using the first FLE entry to store Mbuf. 840 * Currently we donot know which FLE has the mbuf stored. 841 * So while retreiving we can go back 1 FLE from the FD -ADDR 842 * to get the MBUF Addr from the previous FLE. 843 * We can have a better approach to use the inline Mbuf 844 */ 845 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 846 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 847 fle = fle + 1; 848 849 if (likely(bpid < MAX_BPID)) { 850 DPAA2_SET_FD_BPID(fd, bpid); 851 DPAA2_SET_FLE_BPID(fle, bpid); 852 DPAA2_SET_FLE_BPID(fle + 1, bpid); 853 } else { 854 DPAA2_SET_FD_IVP(fd); 855 DPAA2_SET_FLE_IVP(fle); 856 DPAA2_SET_FLE_IVP((fle + 1)); 857 } 858 flc = &priv->flc_desc[DESC_INITFINAL].flc; 859 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 860 861 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 862 fle->length = sess->digest_length; 863 864 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 865 DPAA2_SET_FD_COMPOUND_FMT(fd); 866 fle++; 867 868 if (sess->dir == DIR_ENC) { 869 DPAA2_SET_FLE_ADDR(fle, 870 DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 871 DPAA2_SET_FLE_OFFSET(fle, sym_op->auth.data.offset + 872 sym_op->m_src->data_off); 873 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length); 874 fle->length = sym_op->auth.data.length; 875 } else { 876 sge = fle + 2; 877 DPAA2_SET_FLE_SG_EXT(fle); 878 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 879 880 if (likely(bpid < MAX_BPID)) { 881 DPAA2_SET_FLE_BPID(sge, bpid); 882 DPAA2_SET_FLE_BPID(sge + 1, bpid); 883 } else { 884 DPAA2_SET_FLE_IVP(sge); 885 DPAA2_SET_FLE_IVP((sge + 1)); 886 } 887 DPAA2_SET_FLE_ADDR(sge, 888 DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 889 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + 890 sym_op->m_src->data_off); 891 892 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length + 893 sess->digest_length); 894 sge->length = sym_op->auth.data.length; 895 sge++; 896 old_digest = (uint8_t *)(sge + 1); 897 rte_memcpy(old_digest, sym_op->auth.digest.data, 898 sess->digest_length); 899 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest)); 900 sge->length = sess->digest_length; 901 fle->length = sym_op->auth.data.length + 902 sess->digest_length; 903 DPAA2_SET_FLE_FIN(sge); 904 } 905 DPAA2_SET_FLE_FIN(fle); 906 907 return 0; 908 } 909 910 static int 911 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 912 struct qbman_fd *fd, __rte_unused uint16_t bpid) 913 { 914 struct rte_crypto_sym_op *sym_op = op->sym; 915 struct qbman_fle *ip_fle, *op_fle, *sge, *fle; 916 struct sec_flow_context *flc; 917 struct ctxt_priv *priv = sess->ctxt; 918 struct rte_mbuf *mbuf; 919 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 920 sess->iv.offset); 921 922 PMD_INIT_FUNC_TRACE(); 923 924 if (sym_op->m_dst) 925 mbuf = sym_op->m_dst; 926 else 927 mbuf = sym_op->m_src; 928 929 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE, 930 RTE_CACHE_LINE_SIZE); 931 if (!fle) { 932 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE"); 933 return -1; 934 } 935 memset(fle, 0, FLE_SG_MEM_SIZE); 936 /* first FLE entry used to store mbuf and session ctxt */ 937 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 938 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 939 940 op_fle = fle + 1; 941 ip_fle = fle + 2; 942 sge = fle + 3; 943 944 flc = &priv->flc_desc[0].flc; 945 946 DPAA2_SEC_DP_DEBUG( 947 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d" 948 " data_off: 0x%x\n", 949 sym_op->cipher.data.offset, 950 sym_op->cipher.data.length, 951 sess->iv.length, 952 sym_op->m_src->data_off); 953 954 /* o/p fle */ 955 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); 956 op_fle->length = sym_op->cipher.data.length; 957 DPAA2_SET_FLE_SG_EXT(op_fle); 958 959 /* o/p 1st seg */ 960 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 961 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + mbuf->data_off); 962 sge->length = mbuf->data_len - sym_op->cipher.data.offset; 963 964 mbuf = mbuf->next; 965 /* o/p segs */ 966 while (mbuf) { 967 sge++; 968 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 969 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 970 sge->length = mbuf->data_len; 971 mbuf = mbuf->next; 972 } 973 DPAA2_SET_FLE_FIN(sge); 974 975 DPAA2_SEC_DP_DEBUG( 976 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n", 977 flc, fle, fle->addr_hi, fle->addr_lo, 978 fle->length); 979 980 /* i/p fle */ 981 mbuf = sym_op->m_src; 982 sge++; 983 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); 984 ip_fle->length = sess->iv.length + sym_op->cipher.data.length; 985 DPAA2_SET_FLE_SG_EXT(ip_fle); 986 987 /* i/p IV */ 988 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 989 DPAA2_SET_FLE_OFFSET(sge, 0); 990 sge->length = sess->iv.length; 991 992 sge++; 993 994 /* i/p 1st seg */ 995 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 996 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + 997 mbuf->data_off); 998 sge->length = mbuf->data_len - sym_op->cipher.data.offset; 999 1000 mbuf = mbuf->next; 1001 /* i/p segs */ 1002 while (mbuf) { 1003 sge++; 1004 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); 1005 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off); 1006 sge->length = mbuf->data_len; 1007 mbuf = mbuf->next; 1008 } 1009 DPAA2_SET_FLE_FIN(sge); 1010 DPAA2_SET_FLE_FIN(ip_fle); 1011 1012 /* sg fd */ 1013 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); 1014 DPAA2_SET_FD_LEN(fd, ip_fle->length); 1015 DPAA2_SET_FD_COMPOUND_FMT(fd); 1016 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1017 1018 DPAA2_SEC_DP_DEBUG( 1019 "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d" 1020 " off =%d, len =%d\n", 1021 DPAA2_GET_FD_ADDR(fd), 1022 DPAA2_GET_FD_BPID(fd), 1023 rte_dpaa2_bpid_info[bpid].meta_data_size, 1024 DPAA2_GET_FD_OFFSET(fd), 1025 DPAA2_GET_FD_LEN(fd)); 1026 return 0; 1027 } 1028 1029 static int 1030 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 1031 struct qbman_fd *fd, uint16_t bpid) 1032 { 1033 struct rte_crypto_sym_op *sym_op = op->sym; 1034 struct qbman_fle *fle, *sge; 1035 int retval; 1036 struct sec_flow_context *flc; 1037 struct ctxt_priv *priv = sess->ctxt; 1038 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1039 sess->iv.offset); 1040 struct rte_mbuf *dst; 1041 1042 PMD_INIT_FUNC_TRACE(); 1043 1044 if (sym_op->m_dst) 1045 dst = sym_op->m_dst; 1046 else 1047 dst = sym_op->m_src; 1048 1049 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 1050 if (retval) { 1051 DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE"); 1052 return -1; 1053 } 1054 memset(fle, 0, FLE_POOL_BUF_SIZE); 1055 /* TODO we are using the first FLE entry to store Mbuf. 1056 * Currently we donot know which FLE has the mbuf stored. 1057 * So while retreiving we can go back 1 FLE from the FD -ADDR 1058 * to get the MBUF Addr from the previous FLE. 1059 * We can have a better approach to use the inline Mbuf 1060 */ 1061 DPAA2_SET_FLE_ADDR(fle, (size_t)op); 1062 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); 1063 fle = fle + 1; 1064 sge = fle + 2; 1065 1066 if (likely(bpid < MAX_BPID)) { 1067 DPAA2_SET_FD_BPID(fd, bpid); 1068 DPAA2_SET_FLE_BPID(fle, bpid); 1069 DPAA2_SET_FLE_BPID(fle + 1, bpid); 1070 DPAA2_SET_FLE_BPID(sge, bpid); 1071 DPAA2_SET_FLE_BPID(sge + 1, bpid); 1072 } else { 1073 DPAA2_SET_FD_IVP(fd); 1074 DPAA2_SET_FLE_IVP(fle); 1075 DPAA2_SET_FLE_IVP((fle + 1)); 1076 DPAA2_SET_FLE_IVP(sge); 1077 DPAA2_SET_FLE_IVP((sge + 1)); 1078 } 1079 1080 flc = &priv->flc_desc[0].flc; 1081 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 1082 DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length + 1083 sess->iv.length); 1084 DPAA2_SET_FD_COMPOUND_FMT(fd); 1085 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 1086 1087 DPAA2_SEC_DP_DEBUG( 1088 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d," 1089 " data_off: 0x%x\n", 1090 sym_op->cipher.data.offset, 1091 sym_op->cipher.data.length, 1092 sess->iv.length, 1093 sym_op->m_src->data_off); 1094 1095 DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 1096 DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset + 1097 dst->data_off); 1098 1099 fle->length = sym_op->cipher.data.length + sess->iv.length; 1100 1101 DPAA2_SEC_DP_DEBUG( 1102 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n", 1103 flc, fle, fle->addr_hi, fle->addr_lo, 1104 fle->length); 1105 1106 fle++; 1107 1108 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 1109 fle->length = sym_op->cipher.data.length + sess->iv.length; 1110 1111 DPAA2_SET_FLE_SG_EXT(fle); 1112 1113 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 1114 sge->length = sess->iv.length; 1115 1116 sge++; 1117 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 1118 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + 1119 sym_op->m_src->data_off); 1120 1121 sge->length = sym_op->cipher.data.length; 1122 DPAA2_SET_FLE_FIN(sge); 1123 DPAA2_SET_FLE_FIN(fle); 1124 1125 DPAA2_SEC_DP_DEBUG( 1126 "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d" 1127 " off =%d, len =%d\n", 1128 DPAA2_GET_FD_ADDR(fd), 1129 DPAA2_GET_FD_BPID(fd), 1130 rte_dpaa2_bpid_info[bpid].meta_data_size, 1131 DPAA2_GET_FD_OFFSET(fd), 1132 DPAA2_GET_FD_LEN(fd)); 1133 1134 return 0; 1135 } 1136 1137 static inline int 1138 build_sec_fd(struct rte_crypto_op *op, 1139 struct qbman_fd *fd, uint16_t bpid) 1140 { 1141 int ret = -1; 1142 dpaa2_sec_session *sess; 1143 1144 PMD_INIT_FUNC_TRACE(); 1145 1146 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) 1147 sess = (dpaa2_sec_session *)get_sym_session_private_data( 1148 op->sym->session, cryptodev_driver_id); 1149 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) 1150 sess = (dpaa2_sec_session *)get_sec_session_private_data( 1151 op->sym->sec_session); 1152 else 1153 return -1; 1154 1155 /* Segmented buffer */ 1156 if (unlikely(!rte_pktmbuf_is_contiguous(op->sym->m_src))) { 1157 switch (sess->ctxt_type) { 1158 case DPAA2_SEC_CIPHER: 1159 ret = build_cipher_sg_fd(sess, op, fd, bpid); 1160 break; 1161 case DPAA2_SEC_AUTH: 1162 ret = build_auth_sg_fd(sess, op, fd, bpid); 1163 break; 1164 case DPAA2_SEC_AEAD: 1165 ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid); 1166 break; 1167 case DPAA2_SEC_CIPHER_HASH: 1168 ret = build_authenc_sg_fd(sess, op, fd, bpid); 1169 break; 1170 case DPAA2_SEC_HASH_CIPHER: 1171 default: 1172 DPAA2_SEC_ERR("error: Unsupported session"); 1173 } 1174 } else { 1175 switch (sess->ctxt_type) { 1176 case DPAA2_SEC_CIPHER: 1177 ret = build_cipher_fd(sess, op, fd, bpid); 1178 break; 1179 case DPAA2_SEC_AUTH: 1180 ret = build_auth_fd(sess, op, fd, bpid); 1181 break; 1182 case DPAA2_SEC_AEAD: 1183 ret = build_authenc_gcm_fd(sess, op, fd, bpid); 1184 break; 1185 case DPAA2_SEC_CIPHER_HASH: 1186 ret = build_authenc_fd(sess, op, fd, bpid); 1187 break; 1188 case DPAA2_SEC_IPSEC: 1189 ret = build_proto_fd(sess, op, fd, bpid); 1190 break; 1191 case DPAA2_SEC_HASH_CIPHER: 1192 default: 1193 DPAA2_SEC_ERR("error: Unsupported session"); 1194 } 1195 } 1196 return ret; 1197 } 1198 1199 static uint16_t 1200 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, 1201 uint16_t nb_ops) 1202 { 1203 /* Function to transmit the frames to given device and VQ*/ 1204 uint32_t loop; 1205 int32_t ret; 1206 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 1207 uint32_t frames_to_send; 1208 struct qbman_eq_desc eqdesc; 1209 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 1210 struct qbman_swp *swp; 1211 uint16_t num_tx = 0; 1212 /*todo - need to support multiple buffer pools */ 1213 uint16_t bpid; 1214 struct rte_mempool *mb_pool; 1215 1216 if (unlikely(nb_ops == 0)) 1217 return 0; 1218 1219 if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { 1220 DPAA2_SEC_ERR("sessionless crypto op not supported"); 1221 return 0; 1222 } 1223 /*Prepare enqueue descriptor*/ 1224 qbman_eq_desc_clear(&eqdesc); 1225 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); 1226 qbman_eq_desc_set_response(&eqdesc, 0, 0); 1227 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid); 1228 1229 if (!DPAA2_PER_LCORE_DPIO) { 1230 ret = dpaa2_affine_qbman_swp(); 1231 if (ret) { 1232 DPAA2_SEC_ERR("Failure in affining portal"); 1233 return 0; 1234 } 1235 } 1236 swp = DPAA2_PER_LCORE_PORTAL; 1237 1238 while (nb_ops) { 1239 frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops; 1240 1241 for (loop = 0; loop < frames_to_send; loop++) { 1242 /*Clear the unused FD fields before sending*/ 1243 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd)); 1244 mb_pool = (*ops)->sym->m_src->pool; 1245 bpid = mempool_to_bpid(mb_pool); 1246 ret = build_sec_fd(*ops, &fd_arr[loop], bpid); 1247 if (ret) { 1248 DPAA2_SEC_ERR("error: Improper packet contents" 1249 " for crypto operation"); 1250 goto skip_tx; 1251 } 1252 ops++; 1253 } 1254 loop = 0; 1255 while (loop < frames_to_send) { 1256 loop += qbman_swp_enqueue_multiple(swp, &eqdesc, 1257 &fd_arr[loop], 1258 NULL, 1259 frames_to_send - loop); 1260 } 1261 1262 num_tx += frames_to_send; 1263 nb_ops -= frames_to_send; 1264 } 1265 skip_tx: 1266 dpaa2_qp->tx_vq.tx_pkts += num_tx; 1267 dpaa2_qp->tx_vq.err_pkts += nb_ops; 1268 return num_tx; 1269 } 1270 1271 static inline struct rte_crypto_op * 1272 sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id) 1273 { 1274 struct rte_crypto_op *op; 1275 uint16_t len = DPAA2_GET_FD_LEN(fd); 1276 uint16_t diff = 0; 1277 dpaa2_sec_session *sess_priv; 1278 1279 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF( 1280 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)), 1281 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 1282 1283 op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova; 1284 mbuf->buf_iova = op->sym->aead.digest.phys_addr; 1285 op->sym->aead.digest.phys_addr = 0L; 1286 1287 sess_priv = (dpaa2_sec_session *)get_sec_session_private_data( 1288 op->sym->sec_session); 1289 if (sess_priv->dir == DIR_ENC) 1290 mbuf->data_off += SEC_FLC_DHR_OUTBOUND; 1291 else 1292 mbuf->data_off += SEC_FLC_DHR_INBOUND; 1293 diff = len - mbuf->pkt_len; 1294 mbuf->pkt_len += diff; 1295 mbuf->data_len += diff; 1296 1297 return op; 1298 } 1299 1300 static inline struct rte_crypto_op * 1301 sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id) 1302 { 1303 struct qbman_fle *fle; 1304 struct rte_crypto_op *op; 1305 struct ctxt_priv *priv; 1306 struct rte_mbuf *dst, *src; 1307 1308 if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single) 1309 return sec_simple_fd_to_mbuf(fd, driver_id); 1310 1311 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); 1312 1313 DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n", 1314 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset); 1315 1316 /* we are using the first FLE entry to store Mbuf. 1317 * Currently we donot know which FLE has the mbuf stored. 1318 * So while retreiving we can go back 1 FLE from the FD -ADDR 1319 * to get the MBUF Addr from the previous FLE. 1320 * We can have a better approach to use the inline Mbuf 1321 */ 1322 1323 if (unlikely(DPAA2_GET_FD_IVP(fd))) { 1324 /* TODO complete it. */ 1325 DPAA2_SEC_ERR("error: non inline buffer"); 1326 return NULL; 1327 } 1328 op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1)); 1329 1330 /* Prefeth op */ 1331 src = op->sym->m_src; 1332 rte_prefetch0(src); 1333 1334 if (op->sym->m_dst) { 1335 dst = op->sym->m_dst; 1336 rte_prefetch0(dst); 1337 } else 1338 dst = src; 1339 1340 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 1341 dpaa2_sec_session *sess = (dpaa2_sec_session *) 1342 get_sec_session_private_data(op->sym->sec_session); 1343 if (sess->ctxt_type == DPAA2_SEC_IPSEC) { 1344 uint16_t len = DPAA2_GET_FD_LEN(fd); 1345 dst->pkt_len = len; 1346 dst->data_len = len; 1347 } 1348 } 1349 1350 DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p," 1351 " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n", 1352 (void *)dst, 1353 dst->buf_addr, 1354 DPAA2_GET_FD_ADDR(fd), 1355 DPAA2_GET_FD_BPID(fd), 1356 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, 1357 DPAA2_GET_FD_OFFSET(fd), 1358 DPAA2_GET_FD_LEN(fd)); 1359 1360 /* free the fle memory */ 1361 if (likely(rte_pktmbuf_is_contiguous(src))) { 1362 priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1); 1363 rte_mempool_put(priv->fle_pool, (void *)(fle-1)); 1364 } else 1365 rte_free((void *)(fle-1)); 1366 1367 return op; 1368 } 1369 1370 static uint16_t 1371 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, 1372 uint16_t nb_ops) 1373 { 1374 /* Function is responsible to receive frames for a given device and VQ*/ 1375 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 1376 struct rte_cryptodev *dev = 1377 (struct rte_cryptodev *)(dpaa2_qp->rx_vq.dev); 1378 struct qbman_result *dq_storage; 1379 uint32_t fqid = dpaa2_qp->rx_vq.fqid; 1380 int ret, num_rx = 0; 1381 uint8_t is_last = 0, status; 1382 struct qbman_swp *swp; 1383 const struct qbman_fd *fd; 1384 struct qbman_pull_desc pulldesc; 1385 1386 if (!DPAA2_PER_LCORE_DPIO) { 1387 ret = dpaa2_affine_qbman_swp(); 1388 if (ret) { 1389 DPAA2_SEC_ERR("Failure in affining portal"); 1390 return 0; 1391 } 1392 } 1393 swp = DPAA2_PER_LCORE_PORTAL; 1394 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0]; 1395 1396 qbman_pull_desc_clear(&pulldesc); 1397 qbman_pull_desc_set_numframes(&pulldesc, 1398 (nb_ops > DPAA2_DQRR_RING_SIZE) ? 1399 DPAA2_DQRR_RING_SIZE : nb_ops); 1400 qbman_pull_desc_set_fq(&pulldesc, fqid); 1401 qbman_pull_desc_set_storage(&pulldesc, dq_storage, 1402 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage), 1403 1); 1404 1405 /*Issue a volatile dequeue command. */ 1406 while (1) { 1407 if (qbman_swp_pull(swp, &pulldesc)) { 1408 DPAA2_SEC_WARN( 1409 "SEC VDQ command is not issued : QBMAN busy"); 1410 /* Portal was busy, try again */ 1411 continue; 1412 } 1413 break; 1414 }; 1415 1416 /* Receive the packets till Last Dequeue entry is found with 1417 * respect to the above issues PULL command. 1418 */ 1419 while (!is_last) { 1420 /* Check if the previous issued command is completed. 1421 * Also seems like the SWP is shared between the Ethernet Driver 1422 * and the SEC driver. 1423 */ 1424 while (!qbman_check_command_complete(dq_storage)) 1425 ; 1426 1427 /* Loop until the dq_storage is updated with 1428 * new token by QBMAN 1429 */ 1430 while (!qbman_check_new_result(dq_storage)) 1431 ; 1432 /* Check whether Last Pull command is Expired and 1433 * setting Condition for Loop termination 1434 */ 1435 if (qbman_result_DQ_is_pull_complete(dq_storage)) { 1436 is_last = 1; 1437 /* Check for valid frame. */ 1438 status = (uint8_t)qbman_result_DQ_flags(dq_storage); 1439 if (unlikely( 1440 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) { 1441 DPAA2_SEC_DP_DEBUG("No frame is delivered\n"); 1442 continue; 1443 } 1444 } 1445 1446 fd = qbman_result_DQ_fd(dq_storage); 1447 ops[num_rx] = sec_fd_to_mbuf(fd, dev->driver_id); 1448 1449 if (unlikely(fd->simple.frc)) { 1450 /* TODO Parse SEC errors */ 1451 DPAA2_SEC_ERR("SEC returned Error - %x", 1452 fd->simple.frc); 1453 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR; 1454 } else { 1455 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 1456 } 1457 1458 num_rx++; 1459 dq_storage++; 1460 } /* End of Packet Rx loop */ 1461 1462 dpaa2_qp->rx_vq.rx_pkts += num_rx; 1463 1464 DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx); 1465 /*Return the total number of packets received to DPAA2 app*/ 1466 return num_rx; 1467 } 1468 1469 /** Release queue pair */ 1470 static int 1471 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) 1472 { 1473 struct dpaa2_sec_qp *qp = 1474 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id]; 1475 1476 PMD_INIT_FUNC_TRACE(); 1477 1478 if (qp->rx_vq.q_storage) { 1479 dpaa2_free_dq_storage(qp->rx_vq.q_storage); 1480 rte_free(qp->rx_vq.q_storage); 1481 } 1482 rte_free(qp); 1483 1484 dev->data->queue_pairs[queue_pair_id] = NULL; 1485 1486 return 0; 1487 } 1488 1489 /** Setup a queue pair */ 1490 static int 1491 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, 1492 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf, 1493 __rte_unused int socket_id, 1494 __rte_unused struct rte_mempool *session_pool) 1495 { 1496 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 1497 struct dpaa2_sec_qp *qp; 1498 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 1499 struct dpseci_rx_queue_cfg cfg; 1500 int32_t retcode; 1501 1502 PMD_INIT_FUNC_TRACE(); 1503 1504 /* If qp is already in use free ring memory and qp metadata. */ 1505 if (dev->data->queue_pairs[qp_id] != NULL) { 1506 DPAA2_SEC_INFO("QP already setup"); 1507 return 0; 1508 } 1509 1510 DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p", 1511 dev, qp_id, qp_conf); 1512 1513 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 1514 1515 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp), 1516 RTE_CACHE_LINE_SIZE); 1517 if (!qp) { 1518 DPAA2_SEC_ERR("malloc failed for rx/tx queues"); 1519 return -1; 1520 } 1521 1522 qp->rx_vq.dev = dev; 1523 qp->tx_vq.dev = dev; 1524 qp->rx_vq.q_storage = rte_malloc("sec dq storage", 1525 sizeof(struct queue_storage_info_t), 1526 RTE_CACHE_LINE_SIZE); 1527 if (!qp->rx_vq.q_storage) { 1528 DPAA2_SEC_ERR("malloc failed for q_storage"); 1529 return -1; 1530 } 1531 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t)); 1532 1533 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) { 1534 DPAA2_SEC_ERR("Unable to allocate dequeue storage"); 1535 return -1; 1536 } 1537 1538 dev->data->queue_pairs[qp_id] = qp; 1539 1540 cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX; 1541 cfg.user_ctx = (size_t)(&qp->rx_vq); 1542 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 1543 qp_id, &cfg); 1544 return retcode; 1545 } 1546 1547 /** Return the number of allocated queue pairs */ 1548 static uint32_t 1549 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev) 1550 { 1551 PMD_INIT_FUNC_TRACE(); 1552 1553 return dev->data->nb_queue_pairs; 1554 } 1555 1556 /** Returns the size of the aesni gcm session structure */ 1557 static unsigned int 1558 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) 1559 { 1560 PMD_INIT_FUNC_TRACE(); 1561 1562 return sizeof(dpaa2_sec_session); 1563 } 1564 1565 static int 1566 dpaa2_sec_cipher_init(struct rte_cryptodev *dev, 1567 struct rte_crypto_sym_xform *xform, 1568 dpaa2_sec_session *session) 1569 { 1570 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1571 struct alginfo cipherdata; 1572 int bufsize, i; 1573 struct ctxt_priv *priv; 1574 struct sec_flow_context *flc; 1575 1576 PMD_INIT_FUNC_TRACE(); 1577 1578 /* For SEC CIPHER only one descriptor is required. */ 1579 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1580 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 1581 RTE_CACHE_LINE_SIZE); 1582 if (priv == NULL) { 1583 DPAA2_SEC_ERR("No Memory for priv CTXT"); 1584 return -1; 1585 } 1586 1587 priv->fle_pool = dev_priv->fle_pool; 1588 1589 flc = &priv->flc_desc[0].flc; 1590 1591 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, 1592 RTE_CACHE_LINE_SIZE); 1593 if (session->cipher_key.data == NULL) { 1594 DPAA2_SEC_ERR("No Memory for cipher key"); 1595 rte_free(priv); 1596 return -1; 1597 } 1598 session->cipher_key.length = xform->cipher.key.length; 1599 1600 memcpy(session->cipher_key.data, xform->cipher.key.data, 1601 xform->cipher.key.length); 1602 cipherdata.key = (size_t)session->cipher_key.data; 1603 cipherdata.keylen = session->cipher_key.length; 1604 cipherdata.key_enc_flags = 0; 1605 cipherdata.key_type = RTA_DATA_IMM; 1606 1607 /* Set IV parameters */ 1608 session->iv.offset = xform->cipher.iv.offset; 1609 session->iv.length = xform->cipher.iv.length; 1610 1611 switch (xform->cipher.algo) { 1612 case RTE_CRYPTO_CIPHER_AES_CBC: 1613 cipherdata.algtype = OP_ALG_ALGSEL_AES; 1614 cipherdata.algmode = OP_ALG_AAI_CBC; 1615 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 1616 break; 1617 case RTE_CRYPTO_CIPHER_3DES_CBC: 1618 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 1619 cipherdata.algmode = OP_ALG_AAI_CBC; 1620 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 1621 break; 1622 case RTE_CRYPTO_CIPHER_AES_CTR: 1623 cipherdata.algtype = OP_ALG_ALGSEL_AES; 1624 cipherdata.algmode = OP_ALG_AAI_CTR; 1625 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 1626 break; 1627 case RTE_CRYPTO_CIPHER_3DES_CTR: 1628 case RTE_CRYPTO_CIPHER_AES_ECB: 1629 case RTE_CRYPTO_CIPHER_3DES_ECB: 1630 case RTE_CRYPTO_CIPHER_AES_XTS: 1631 case RTE_CRYPTO_CIPHER_AES_F8: 1632 case RTE_CRYPTO_CIPHER_ARC4: 1633 case RTE_CRYPTO_CIPHER_KASUMI_F8: 1634 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 1635 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 1636 case RTE_CRYPTO_CIPHER_NULL: 1637 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", 1638 xform->cipher.algo); 1639 goto error_out; 1640 default: 1641 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 1642 xform->cipher.algo); 1643 goto error_out; 1644 } 1645 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1646 DIR_ENC : DIR_DEC; 1647 1648 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 1649 &cipherdata, NULL, session->iv.length, 1650 session->dir); 1651 if (bufsize < 0) { 1652 DPAA2_SEC_ERR("Crypto: Descriptor build failed"); 1653 goto error_out; 1654 } 1655 flc->dhr = 0; 1656 flc->bpv0 = 0x1; 1657 flc->mode_bits = 0x8000; 1658 1659 flc->word1_sdl = (uint8_t)bufsize; 1660 flc->word2_rflc_31_0 = lower_32_bits( 1661 (size_t)&(((struct dpaa2_sec_qp *) 1662 dev->data->queue_pairs[0])->rx_vq)); 1663 flc->word3_rflc_63_32 = upper_32_bits( 1664 (size_t)&(((struct dpaa2_sec_qp *) 1665 dev->data->queue_pairs[0])->rx_vq)); 1666 session->ctxt = priv; 1667 1668 for (i = 0; i < bufsize; i++) 1669 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]); 1670 1671 return 0; 1672 1673 error_out: 1674 rte_free(session->cipher_key.data); 1675 rte_free(priv); 1676 return -1; 1677 } 1678 1679 static int 1680 dpaa2_sec_auth_init(struct rte_cryptodev *dev, 1681 struct rte_crypto_sym_xform *xform, 1682 dpaa2_sec_session *session) 1683 { 1684 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1685 struct alginfo authdata; 1686 int bufsize, i; 1687 struct ctxt_priv *priv; 1688 struct sec_flow_context *flc; 1689 1690 PMD_INIT_FUNC_TRACE(); 1691 1692 /* For SEC AUTH three descriptors are required for various stages */ 1693 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1694 sizeof(struct ctxt_priv) + 3 * 1695 sizeof(struct sec_flc_desc), 1696 RTE_CACHE_LINE_SIZE); 1697 if (priv == NULL) { 1698 DPAA2_SEC_ERR("No Memory for priv CTXT"); 1699 return -1; 1700 } 1701 1702 priv->fle_pool = dev_priv->fle_pool; 1703 flc = &priv->flc_desc[DESC_INITFINAL].flc; 1704 1705 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length, 1706 RTE_CACHE_LINE_SIZE); 1707 if (session->auth_key.data == NULL) { 1708 DPAA2_SEC_ERR("Unable to allocate memory for auth key"); 1709 rte_free(priv); 1710 return -1; 1711 } 1712 session->auth_key.length = xform->auth.key.length; 1713 1714 memcpy(session->auth_key.data, xform->auth.key.data, 1715 xform->auth.key.length); 1716 authdata.key = (size_t)session->auth_key.data; 1717 authdata.keylen = session->auth_key.length; 1718 authdata.key_enc_flags = 0; 1719 authdata.key_type = RTA_DATA_IMM; 1720 1721 session->digest_length = xform->auth.digest_length; 1722 1723 switch (xform->auth.algo) { 1724 case RTE_CRYPTO_AUTH_SHA1_HMAC: 1725 authdata.algtype = OP_ALG_ALGSEL_SHA1; 1726 authdata.algmode = OP_ALG_AAI_HMAC; 1727 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 1728 break; 1729 case RTE_CRYPTO_AUTH_MD5_HMAC: 1730 authdata.algtype = OP_ALG_ALGSEL_MD5; 1731 authdata.algmode = OP_ALG_AAI_HMAC; 1732 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 1733 break; 1734 case RTE_CRYPTO_AUTH_SHA256_HMAC: 1735 authdata.algtype = OP_ALG_ALGSEL_SHA256; 1736 authdata.algmode = OP_ALG_AAI_HMAC; 1737 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 1738 break; 1739 case RTE_CRYPTO_AUTH_SHA384_HMAC: 1740 authdata.algtype = OP_ALG_ALGSEL_SHA384; 1741 authdata.algmode = OP_ALG_AAI_HMAC; 1742 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 1743 break; 1744 case RTE_CRYPTO_AUTH_SHA512_HMAC: 1745 authdata.algtype = OP_ALG_ALGSEL_SHA512; 1746 authdata.algmode = OP_ALG_AAI_HMAC; 1747 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 1748 break; 1749 case RTE_CRYPTO_AUTH_SHA224_HMAC: 1750 authdata.algtype = OP_ALG_ALGSEL_SHA224; 1751 authdata.algmode = OP_ALG_AAI_HMAC; 1752 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 1753 break; 1754 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 1755 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 1756 case RTE_CRYPTO_AUTH_NULL: 1757 case RTE_CRYPTO_AUTH_SHA1: 1758 case RTE_CRYPTO_AUTH_SHA256: 1759 case RTE_CRYPTO_AUTH_SHA512: 1760 case RTE_CRYPTO_AUTH_SHA224: 1761 case RTE_CRYPTO_AUTH_SHA384: 1762 case RTE_CRYPTO_AUTH_MD5: 1763 case RTE_CRYPTO_AUTH_AES_GMAC: 1764 case RTE_CRYPTO_AUTH_KASUMI_F9: 1765 case RTE_CRYPTO_AUTH_AES_CMAC: 1766 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 1767 case RTE_CRYPTO_AUTH_ZUC_EIA3: 1768 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un", 1769 xform->auth.algo); 1770 goto error_out; 1771 default: 1772 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", 1773 xform->auth.algo); 1774 goto error_out; 1775 } 1776 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? 1777 DIR_ENC : DIR_DEC; 1778 1779 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 1780 1, 0, &authdata, !session->dir, 1781 session->digest_length); 1782 if (bufsize < 0) { 1783 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 1784 goto error_out; 1785 } 1786 1787 flc->word1_sdl = (uint8_t)bufsize; 1788 flc->word2_rflc_31_0 = lower_32_bits( 1789 (size_t)&(((struct dpaa2_sec_qp *) 1790 dev->data->queue_pairs[0])->rx_vq)); 1791 flc->word3_rflc_63_32 = upper_32_bits( 1792 (size_t)&(((struct dpaa2_sec_qp *) 1793 dev->data->queue_pairs[0])->rx_vq)); 1794 session->ctxt = priv; 1795 for (i = 0; i < bufsize; i++) 1796 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", 1797 i, priv->flc_desc[DESC_INITFINAL].desc[i]); 1798 1799 1800 return 0; 1801 1802 error_out: 1803 rte_free(session->auth_key.data); 1804 rte_free(priv); 1805 return -1; 1806 } 1807 1808 static int 1809 dpaa2_sec_aead_init(struct rte_cryptodev *dev, 1810 struct rte_crypto_sym_xform *xform, 1811 dpaa2_sec_session *session) 1812 { 1813 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt; 1814 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1815 struct alginfo aeaddata; 1816 int bufsize, i; 1817 struct ctxt_priv *priv; 1818 struct sec_flow_context *flc; 1819 struct rte_crypto_aead_xform *aead_xform = &xform->aead; 1820 int err; 1821 1822 PMD_INIT_FUNC_TRACE(); 1823 1824 /* Set IV parameters */ 1825 session->iv.offset = aead_xform->iv.offset; 1826 session->iv.length = aead_xform->iv.length; 1827 session->ctxt_type = DPAA2_SEC_AEAD; 1828 1829 /* For SEC AEAD only one descriptor is required */ 1830 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1831 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 1832 RTE_CACHE_LINE_SIZE); 1833 if (priv == NULL) { 1834 DPAA2_SEC_ERR("No Memory for priv CTXT"); 1835 return -1; 1836 } 1837 1838 priv->fle_pool = dev_priv->fle_pool; 1839 flc = &priv->flc_desc[0].flc; 1840 1841 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 1842 RTE_CACHE_LINE_SIZE); 1843 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 1844 DPAA2_SEC_ERR("No Memory for aead key"); 1845 rte_free(priv); 1846 return -1; 1847 } 1848 memcpy(session->aead_key.data, aead_xform->key.data, 1849 aead_xform->key.length); 1850 1851 session->digest_length = aead_xform->digest_length; 1852 session->aead_key.length = aead_xform->key.length; 1853 ctxt->auth_only_len = aead_xform->aad_length; 1854 1855 aeaddata.key = (size_t)session->aead_key.data; 1856 aeaddata.keylen = session->aead_key.length; 1857 aeaddata.key_enc_flags = 0; 1858 aeaddata.key_type = RTA_DATA_IMM; 1859 1860 switch (aead_xform->algo) { 1861 case RTE_CRYPTO_AEAD_AES_GCM: 1862 aeaddata.algtype = OP_ALG_ALGSEL_AES; 1863 aeaddata.algmode = OP_ALG_AAI_GCM; 1864 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; 1865 break; 1866 case RTE_CRYPTO_AEAD_AES_CCM: 1867 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u", 1868 aead_xform->algo); 1869 goto error_out; 1870 default: 1871 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u", 1872 aead_xform->algo); 1873 goto error_out; 1874 } 1875 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 1876 DIR_ENC : DIR_DEC; 1877 1878 priv->flc_desc[0].desc[0] = aeaddata.keylen; 1879 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 1880 MIN_JOB_DESC_SIZE, 1881 (unsigned int *)priv->flc_desc[0].desc, 1882 &priv->flc_desc[0].desc[1], 1); 1883 1884 if (err < 0) { 1885 DPAA2_SEC_ERR("Crypto: Incorrect key lengths"); 1886 goto error_out; 1887 } 1888 if (priv->flc_desc[0].desc[1] & 1) { 1889 aeaddata.key_type = RTA_DATA_IMM; 1890 } else { 1891 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key); 1892 aeaddata.key_type = RTA_DATA_PTR; 1893 } 1894 priv->flc_desc[0].desc[0] = 0; 1895 priv->flc_desc[0].desc[1] = 0; 1896 1897 if (session->dir == DIR_ENC) 1898 bufsize = cnstr_shdsc_gcm_encap( 1899 priv->flc_desc[0].desc, 1, 0, 1900 &aeaddata, session->iv.length, 1901 session->digest_length); 1902 else 1903 bufsize = cnstr_shdsc_gcm_decap( 1904 priv->flc_desc[0].desc, 1, 0, 1905 &aeaddata, session->iv.length, 1906 session->digest_length); 1907 if (bufsize < 0) { 1908 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 1909 goto error_out; 1910 } 1911 1912 flc->word1_sdl = (uint8_t)bufsize; 1913 flc->word2_rflc_31_0 = lower_32_bits( 1914 (size_t)&(((struct dpaa2_sec_qp *) 1915 dev->data->queue_pairs[0])->rx_vq)); 1916 flc->word3_rflc_63_32 = upper_32_bits( 1917 (size_t)&(((struct dpaa2_sec_qp *) 1918 dev->data->queue_pairs[0])->rx_vq)); 1919 session->ctxt = priv; 1920 for (i = 0; i < bufsize; i++) 1921 DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n", 1922 i, priv->flc_desc[0].desc[i]); 1923 1924 return 0; 1925 1926 error_out: 1927 rte_free(session->aead_key.data); 1928 rte_free(priv); 1929 return -1; 1930 } 1931 1932 1933 static int 1934 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, 1935 struct rte_crypto_sym_xform *xform, 1936 dpaa2_sec_session *session) 1937 { 1938 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt; 1939 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1940 struct alginfo authdata, cipherdata; 1941 int bufsize, i; 1942 struct ctxt_priv *priv; 1943 struct sec_flow_context *flc; 1944 struct rte_crypto_cipher_xform *cipher_xform; 1945 struct rte_crypto_auth_xform *auth_xform; 1946 int err; 1947 1948 PMD_INIT_FUNC_TRACE(); 1949 1950 if (session->ext_params.aead_ctxt.auth_cipher_text) { 1951 cipher_xform = &xform->cipher; 1952 auth_xform = &xform->next->auth; 1953 session->ctxt_type = 1954 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1955 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER; 1956 } else { 1957 cipher_xform = &xform->next->cipher; 1958 auth_xform = &xform->auth; 1959 session->ctxt_type = 1960 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1961 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH; 1962 } 1963 1964 /* Set IV parameters */ 1965 session->iv.offset = cipher_xform->iv.offset; 1966 session->iv.length = cipher_xform->iv.length; 1967 1968 /* For SEC AEAD only one descriptor is required */ 1969 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1970 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 1971 RTE_CACHE_LINE_SIZE); 1972 if (priv == NULL) { 1973 DPAA2_SEC_ERR("No Memory for priv CTXT"); 1974 return -1; 1975 } 1976 1977 priv->fle_pool = dev_priv->fle_pool; 1978 flc = &priv->flc_desc[0].flc; 1979 1980 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length, 1981 RTE_CACHE_LINE_SIZE); 1982 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) { 1983 DPAA2_SEC_ERR("No Memory for cipher key"); 1984 rte_free(priv); 1985 return -1; 1986 } 1987 session->cipher_key.length = cipher_xform->key.length; 1988 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length, 1989 RTE_CACHE_LINE_SIZE); 1990 if (session->auth_key.data == NULL && auth_xform->key.length > 0) { 1991 DPAA2_SEC_ERR("No Memory for auth key"); 1992 rte_free(session->cipher_key.data); 1993 rte_free(priv); 1994 return -1; 1995 } 1996 session->auth_key.length = auth_xform->key.length; 1997 memcpy(session->cipher_key.data, cipher_xform->key.data, 1998 cipher_xform->key.length); 1999 memcpy(session->auth_key.data, auth_xform->key.data, 2000 auth_xform->key.length); 2001 2002 authdata.key = (size_t)session->auth_key.data; 2003 authdata.keylen = session->auth_key.length; 2004 authdata.key_enc_flags = 0; 2005 authdata.key_type = RTA_DATA_IMM; 2006 2007 session->digest_length = auth_xform->digest_length; 2008 2009 switch (auth_xform->algo) { 2010 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2011 authdata.algtype = OP_ALG_ALGSEL_SHA1; 2012 authdata.algmode = OP_ALG_AAI_HMAC; 2013 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 2014 break; 2015 case RTE_CRYPTO_AUTH_MD5_HMAC: 2016 authdata.algtype = OP_ALG_ALGSEL_MD5; 2017 authdata.algmode = OP_ALG_AAI_HMAC; 2018 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 2019 break; 2020 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2021 authdata.algtype = OP_ALG_ALGSEL_SHA224; 2022 authdata.algmode = OP_ALG_AAI_HMAC; 2023 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 2024 break; 2025 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2026 authdata.algtype = OP_ALG_ALGSEL_SHA256; 2027 authdata.algmode = OP_ALG_AAI_HMAC; 2028 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 2029 break; 2030 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2031 authdata.algtype = OP_ALG_ALGSEL_SHA384; 2032 authdata.algmode = OP_ALG_AAI_HMAC; 2033 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 2034 break; 2035 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2036 authdata.algtype = OP_ALG_ALGSEL_SHA512; 2037 authdata.algmode = OP_ALG_AAI_HMAC; 2038 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 2039 break; 2040 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2041 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2042 case RTE_CRYPTO_AUTH_NULL: 2043 case RTE_CRYPTO_AUTH_SHA1: 2044 case RTE_CRYPTO_AUTH_SHA256: 2045 case RTE_CRYPTO_AUTH_SHA512: 2046 case RTE_CRYPTO_AUTH_SHA224: 2047 case RTE_CRYPTO_AUTH_SHA384: 2048 case RTE_CRYPTO_AUTH_MD5: 2049 case RTE_CRYPTO_AUTH_AES_GMAC: 2050 case RTE_CRYPTO_AUTH_KASUMI_F9: 2051 case RTE_CRYPTO_AUTH_AES_CMAC: 2052 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 2053 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2054 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 2055 auth_xform->algo); 2056 goto error_out; 2057 default: 2058 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", 2059 auth_xform->algo); 2060 goto error_out; 2061 } 2062 cipherdata.key = (size_t)session->cipher_key.data; 2063 cipherdata.keylen = session->cipher_key.length; 2064 cipherdata.key_enc_flags = 0; 2065 cipherdata.key_type = RTA_DATA_IMM; 2066 2067 switch (cipher_xform->algo) { 2068 case RTE_CRYPTO_CIPHER_AES_CBC: 2069 cipherdata.algtype = OP_ALG_ALGSEL_AES; 2070 cipherdata.algmode = OP_ALG_AAI_CBC; 2071 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 2072 break; 2073 case RTE_CRYPTO_CIPHER_3DES_CBC: 2074 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 2075 cipherdata.algmode = OP_ALG_AAI_CBC; 2076 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 2077 break; 2078 case RTE_CRYPTO_CIPHER_AES_CTR: 2079 cipherdata.algtype = OP_ALG_ALGSEL_AES; 2080 cipherdata.algmode = OP_ALG_AAI_CTR; 2081 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 2082 break; 2083 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2084 case RTE_CRYPTO_CIPHER_NULL: 2085 case RTE_CRYPTO_CIPHER_3DES_ECB: 2086 case RTE_CRYPTO_CIPHER_AES_ECB: 2087 case RTE_CRYPTO_CIPHER_KASUMI_F8: 2088 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", 2089 cipher_xform->algo); 2090 goto error_out; 2091 default: 2092 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 2093 cipher_xform->algo); 2094 goto error_out; 2095 } 2096 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 2097 DIR_ENC : DIR_DEC; 2098 2099 priv->flc_desc[0].desc[0] = cipherdata.keylen; 2100 priv->flc_desc[0].desc[1] = authdata.keylen; 2101 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 2102 MIN_JOB_DESC_SIZE, 2103 (unsigned int *)priv->flc_desc[0].desc, 2104 &priv->flc_desc[0].desc[2], 2); 2105 2106 if (err < 0) { 2107 DPAA2_SEC_ERR("Crypto: Incorrect key lengths"); 2108 goto error_out; 2109 } 2110 if (priv->flc_desc[0].desc[2] & 1) { 2111 cipherdata.key_type = RTA_DATA_IMM; 2112 } else { 2113 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); 2114 cipherdata.key_type = RTA_DATA_PTR; 2115 } 2116 if (priv->flc_desc[0].desc[2] & (1 << 1)) { 2117 authdata.key_type = RTA_DATA_IMM; 2118 } else { 2119 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key); 2120 authdata.key_type = RTA_DATA_PTR; 2121 } 2122 priv->flc_desc[0].desc[0] = 0; 2123 priv->flc_desc[0].desc[1] = 0; 2124 priv->flc_desc[0].desc[2] = 0; 2125 2126 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) { 2127 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1, 2128 0, &cipherdata, &authdata, 2129 session->iv.length, 2130 ctxt->auth_only_len, 2131 session->digest_length, 2132 session->dir); 2133 if (bufsize < 0) { 2134 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2135 goto error_out; 2136 } 2137 } else { 2138 DPAA2_SEC_ERR("Hash before cipher not supported"); 2139 goto error_out; 2140 } 2141 2142 flc->word1_sdl = (uint8_t)bufsize; 2143 flc->word2_rflc_31_0 = lower_32_bits( 2144 (size_t)&(((struct dpaa2_sec_qp *) 2145 dev->data->queue_pairs[0])->rx_vq)); 2146 flc->word3_rflc_63_32 = upper_32_bits( 2147 (size_t)&(((struct dpaa2_sec_qp *) 2148 dev->data->queue_pairs[0])->rx_vq)); 2149 session->ctxt = priv; 2150 for (i = 0; i < bufsize; i++) 2151 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", 2152 i, priv->flc_desc[0].desc[i]); 2153 2154 return 0; 2155 2156 error_out: 2157 rte_free(session->cipher_key.data); 2158 rte_free(session->auth_key.data); 2159 rte_free(priv); 2160 return -1; 2161 } 2162 2163 static int 2164 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev, 2165 struct rte_crypto_sym_xform *xform, void *sess) 2166 { 2167 dpaa2_sec_session *session = sess; 2168 2169 PMD_INIT_FUNC_TRACE(); 2170 2171 if (unlikely(sess == NULL)) { 2172 DPAA2_SEC_ERR("Invalid session struct"); 2173 return -1; 2174 } 2175 2176 memset(session, 0, sizeof(dpaa2_sec_session)); 2177 /* Default IV length = 0 */ 2178 session->iv.length = 0; 2179 2180 /* Cipher Only */ 2181 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 2182 session->ctxt_type = DPAA2_SEC_CIPHER; 2183 dpaa2_sec_cipher_init(dev, xform, session); 2184 2185 /* Authentication Only */ 2186 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2187 xform->next == NULL) { 2188 session->ctxt_type = DPAA2_SEC_AUTH; 2189 dpaa2_sec_auth_init(dev, xform, session); 2190 2191 /* Cipher then Authenticate */ 2192 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 2193 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2194 session->ext_params.aead_ctxt.auth_cipher_text = true; 2195 dpaa2_sec_aead_chain_init(dev, xform, session); 2196 2197 /* Authenticate then Cipher */ 2198 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 2199 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2200 session->ext_params.aead_ctxt.auth_cipher_text = false; 2201 dpaa2_sec_aead_chain_init(dev, xform, session); 2202 2203 /* AEAD operation for AES-GCM kind of Algorithms */ 2204 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && 2205 xform->next == NULL) { 2206 dpaa2_sec_aead_init(dev, xform, session); 2207 2208 } else { 2209 DPAA2_SEC_ERR("Invalid crypto type"); 2210 return -EINVAL; 2211 } 2212 2213 return 0; 2214 } 2215 2216 static int 2217 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform, 2218 dpaa2_sec_session *session, 2219 struct alginfo *aeaddata) 2220 { 2221 PMD_INIT_FUNC_TRACE(); 2222 2223 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 2224 RTE_CACHE_LINE_SIZE); 2225 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 2226 DPAA2_SEC_ERR("No Memory for aead key"); 2227 return -1; 2228 } 2229 memcpy(session->aead_key.data, aead_xform->key.data, 2230 aead_xform->key.length); 2231 2232 session->digest_length = aead_xform->digest_length; 2233 session->aead_key.length = aead_xform->key.length; 2234 2235 aeaddata->key = (size_t)session->aead_key.data; 2236 aeaddata->keylen = session->aead_key.length; 2237 aeaddata->key_enc_flags = 0; 2238 aeaddata->key_type = RTA_DATA_IMM; 2239 2240 switch (aead_xform->algo) { 2241 case RTE_CRYPTO_AEAD_AES_GCM: 2242 aeaddata->algtype = OP_ALG_ALGSEL_AES; 2243 aeaddata->algmode = OP_ALG_AAI_GCM; 2244 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; 2245 break; 2246 case RTE_CRYPTO_AEAD_AES_CCM: 2247 aeaddata->algtype = OP_ALG_ALGSEL_AES; 2248 aeaddata->algmode = OP_ALG_AAI_CCM; 2249 session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM; 2250 break; 2251 default: 2252 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u", 2253 aead_xform->algo); 2254 return -1; 2255 } 2256 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 2257 DIR_ENC : DIR_DEC; 2258 2259 return 0; 2260 } 2261 2262 static int 2263 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform, 2264 struct rte_crypto_auth_xform *auth_xform, 2265 dpaa2_sec_session *session, 2266 struct alginfo *cipherdata, 2267 struct alginfo *authdata) 2268 { 2269 if (cipher_xform) { 2270 session->cipher_key.data = rte_zmalloc(NULL, 2271 cipher_xform->key.length, 2272 RTE_CACHE_LINE_SIZE); 2273 if (session->cipher_key.data == NULL && 2274 cipher_xform->key.length > 0) { 2275 DPAA2_SEC_ERR("No Memory for cipher key"); 2276 return -ENOMEM; 2277 } 2278 2279 session->cipher_key.length = cipher_xform->key.length; 2280 memcpy(session->cipher_key.data, cipher_xform->key.data, 2281 cipher_xform->key.length); 2282 session->cipher_alg = cipher_xform->algo; 2283 } else { 2284 session->cipher_key.data = NULL; 2285 session->cipher_key.length = 0; 2286 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 2287 } 2288 2289 if (auth_xform) { 2290 session->auth_key.data = rte_zmalloc(NULL, 2291 auth_xform->key.length, 2292 RTE_CACHE_LINE_SIZE); 2293 if (session->auth_key.data == NULL && 2294 auth_xform->key.length > 0) { 2295 DPAA2_SEC_ERR("No Memory for auth key"); 2296 return -ENOMEM; 2297 } 2298 session->auth_key.length = auth_xform->key.length; 2299 memcpy(session->auth_key.data, auth_xform->key.data, 2300 auth_xform->key.length); 2301 session->auth_alg = auth_xform->algo; 2302 } else { 2303 session->auth_key.data = NULL; 2304 session->auth_key.length = 0; 2305 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 2306 } 2307 2308 authdata->key = (size_t)session->auth_key.data; 2309 authdata->keylen = session->auth_key.length; 2310 authdata->key_enc_flags = 0; 2311 authdata->key_type = RTA_DATA_IMM; 2312 switch (session->auth_alg) { 2313 case RTE_CRYPTO_AUTH_SHA1_HMAC: 2314 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96; 2315 authdata->algmode = OP_ALG_AAI_HMAC; 2316 break; 2317 case RTE_CRYPTO_AUTH_MD5_HMAC: 2318 authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96; 2319 authdata->algmode = OP_ALG_AAI_HMAC; 2320 break; 2321 case RTE_CRYPTO_AUTH_SHA256_HMAC: 2322 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128; 2323 authdata->algmode = OP_ALG_AAI_HMAC; 2324 break; 2325 case RTE_CRYPTO_AUTH_SHA384_HMAC: 2326 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192; 2327 authdata->algmode = OP_ALG_AAI_HMAC; 2328 break; 2329 case RTE_CRYPTO_AUTH_SHA512_HMAC: 2330 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256; 2331 authdata->algmode = OP_ALG_AAI_HMAC; 2332 break; 2333 case RTE_CRYPTO_AUTH_AES_CMAC: 2334 authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96; 2335 break; 2336 case RTE_CRYPTO_AUTH_NULL: 2337 authdata->algtype = OP_PCL_IPSEC_HMAC_NULL; 2338 break; 2339 case RTE_CRYPTO_AUTH_SHA224_HMAC: 2340 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 2341 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 2342 case RTE_CRYPTO_AUTH_SHA1: 2343 case RTE_CRYPTO_AUTH_SHA256: 2344 case RTE_CRYPTO_AUTH_SHA512: 2345 case RTE_CRYPTO_AUTH_SHA224: 2346 case RTE_CRYPTO_AUTH_SHA384: 2347 case RTE_CRYPTO_AUTH_MD5: 2348 case RTE_CRYPTO_AUTH_AES_GMAC: 2349 case RTE_CRYPTO_AUTH_KASUMI_F9: 2350 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 2351 case RTE_CRYPTO_AUTH_ZUC_EIA3: 2352 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", 2353 session->auth_alg); 2354 return -1; 2355 default: 2356 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", 2357 session->auth_alg); 2358 return -1; 2359 } 2360 cipherdata->key = (size_t)session->cipher_key.data; 2361 cipherdata->keylen = session->cipher_key.length; 2362 cipherdata->key_enc_flags = 0; 2363 cipherdata->key_type = RTA_DATA_IMM; 2364 2365 switch (session->cipher_alg) { 2366 case RTE_CRYPTO_CIPHER_AES_CBC: 2367 cipherdata->algtype = OP_PCL_IPSEC_AES_CBC; 2368 cipherdata->algmode = OP_ALG_AAI_CBC; 2369 break; 2370 case RTE_CRYPTO_CIPHER_3DES_CBC: 2371 cipherdata->algtype = OP_PCL_IPSEC_3DES; 2372 cipherdata->algmode = OP_ALG_AAI_CBC; 2373 break; 2374 case RTE_CRYPTO_CIPHER_AES_CTR: 2375 cipherdata->algtype = OP_PCL_IPSEC_AES_CTR; 2376 cipherdata->algmode = OP_ALG_AAI_CTR; 2377 break; 2378 case RTE_CRYPTO_CIPHER_NULL: 2379 cipherdata->algtype = OP_PCL_IPSEC_NULL; 2380 break; 2381 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 2382 case RTE_CRYPTO_CIPHER_3DES_ECB: 2383 case RTE_CRYPTO_CIPHER_AES_ECB: 2384 case RTE_CRYPTO_CIPHER_KASUMI_F8: 2385 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", 2386 session->cipher_alg); 2387 return -1; 2388 default: 2389 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", 2390 session->cipher_alg); 2391 return -1; 2392 } 2393 2394 return 0; 2395 } 2396 2397 #ifdef RTE_LIBRTE_SECURITY_TEST 2398 static uint8_t aes_cbc_iv[] = { 2399 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 2400 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }; 2401 #endif 2402 2403 static int 2404 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, 2405 struct rte_security_session_conf *conf, 2406 void *sess) 2407 { 2408 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec; 2409 struct rte_crypto_cipher_xform *cipher_xform = NULL; 2410 struct rte_crypto_auth_xform *auth_xform = NULL; 2411 struct rte_crypto_aead_xform *aead_xform = NULL; 2412 dpaa2_sec_session *session = (dpaa2_sec_session *)sess; 2413 struct ctxt_priv *priv; 2414 struct ipsec_encap_pdb encap_pdb; 2415 struct ipsec_decap_pdb decap_pdb; 2416 struct alginfo authdata, cipherdata; 2417 int bufsize; 2418 struct sec_flow_context *flc; 2419 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 2420 int ret = -1; 2421 2422 PMD_INIT_FUNC_TRACE(); 2423 2424 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 2425 sizeof(struct ctxt_priv) + 2426 sizeof(struct sec_flc_desc), 2427 RTE_CACHE_LINE_SIZE); 2428 2429 if (priv == NULL) { 2430 DPAA2_SEC_ERR("No memory for priv CTXT"); 2431 return -ENOMEM; 2432 } 2433 2434 priv->fle_pool = dev_priv->fle_pool; 2435 flc = &priv->flc_desc[0].flc; 2436 2437 memset(session, 0, sizeof(dpaa2_sec_session)); 2438 2439 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 2440 cipher_xform = &conf->crypto_xform->cipher; 2441 if (conf->crypto_xform->next) 2442 auth_xform = &conf->crypto_xform->next->auth; 2443 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform, 2444 session, &cipherdata, &authdata); 2445 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 2446 auth_xform = &conf->crypto_xform->auth; 2447 if (conf->crypto_xform->next) 2448 cipher_xform = &conf->crypto_xform->next->cipher; 2449 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform, 2450 session, &cipherdata, &authdata); 2451 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { 2452 aead_xform = &conf->crypto_xform->aead; 2453 ret = dpaa2_sec_ipsec_aead_init(aead_xform, 2454 session, &cipherdata); 2455 } else { 2456 DPAA2_SEC_ERR("XFORM not specified"); 2457 ret = -EINVAL; 2458 goto out; 2459 } 2460 if (ret) { 2461 DPAA2_SEC_ERR("Failed to process xform"); 2462 goto out; 2463 } 2464 2465 session->ctxt_type = DPAA2_SEC_IPSEC; 2466 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 2467 struct ip ip4_hdr; 2468 2469 flc->dhr = SEC_FLC_DHR_OUTBOUND; 2470 ip4_hdr.ip_v = IPVERSION; 2471 ip4_hdr.ip_hl = 5; 2472 ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr)); 2473 ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp; 2474 ip4_hdr.ip_id = 0; 2475 ip4_hdr.ip_off = 0; 2476 ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl; 2477 ip4_hdr.ip_p = IPPROTO_ESP; 2478 ip4_hdr.ip_sum = 0; 2479 ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip; 2480 ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip; 2481 ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)&ip4_hdr, 2482 sizeof(struct ip)); 2483 2484 /* For Sec Proto only one descriptor is required. */ 2485 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb)); 2486 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) | 2487 PDBOPTS_ESP_OIHI_PDB_INL | 2488 PDBOPTS_ESP_IVSRC | 2489 PDBHMO_ESP_ENCAP_DTTL | 2490 PDBHMO_ESP_SNR; 2491 encap_pdb.spi = ipsec_xform->spi; 2492 encap_pdb.ip_hdr_len = sizeof(struct ip); 2493 2494 session->dir = DIR_ENC; 2495 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc, 2496 1, 0, SHR_SERIAL, &encap_pdb, 2497 (uint8_t *)&ip4_hdr, 2498 &cipherdata, &authdata); 2499 } else if (ipsec_xform->direction == 2500 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { 2501 flc->dhr = SEC_FLC_DHR_INBOUND; 2502 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb)); 2503 decap_pdb.options = sizeof(struct ip) << 16; 2504 session->dir = DIR_DEC; 2505 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc, 2506 1, 0, SHR_SERIAL, 2507 &decap_pdb, &cipherdata, &authdata); 2508 } else 2509 goto out; 2510 2511 if (bufsize < 0) { 2512 DPAA2_SEC_ERR("Crypto: Invalid buffer length"); 2513 goto out; 2514 } 2515 2516 flc->word1_sdl = (uint8_t)bufsize; 2517 2518 /* Enable the stashing control bit */ 2519 DPAA2_SET_FLC_RSC(flc); 2520 flc->word2_rflc_31_0 = lower_32_bits( 2521 (size_t)&(((struct dpaa2_sec_qp *) 2522 dev->data->queue_pairs[0])->rx_vq) | 0x14); 2523 flc->word3_rflc_63_32 = upper_32_bits( 2524 (size_t)&(((struct dpaa2_sec_qp *) 2525 dev->data->queue_pairs[0])->rx_vq)); 2526 2527 /* Set EWS bit i.e. enable write-safe */ 2528 DPAA2_SET_FLC_EWS(flc); 2529 /* Set BS = 1 i.e reuse input buffers as output buffers */ 2530 DPAA2_SET_FLC_REUSE_BS(flc); 2531 /* Set FF = 10; reuse input buffers if they provide sufficient space */ 2532 DPAA2_SET_FLC_REUSE_FF(flc); 2533 2534 session->ctxt = priv; 2535 2536 return 0; 2537 out: 2538 rte_free(session->auth_key.data); 2539 rte_free(session->cipher_key.data); 2540 rte_free(priv); 2541 return ret; 2542 } 2543 2544 static int 2545 dpaa2_sec_security_session_create(void *dev, 2546 struct rte_security_session_conf *conf, 2547 struct rte_security_session *sess, 2548 struct rte_mempool *mempool) 2549 { 2550 void *sess_private_data; 2551 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; 2552 int ret; 2553 2554 if (rte_mempool_get(mempool, &sess_private_data)) { 2555 DPAA2_SEC_ERR("Couldn't get object from session mempool"); 2556 return -ENOMEM; 2557 } 2558 2559 switch (conf->protocol) { 2560 case RTE_SECURITY_PROTOCOL_IPSEC: 2561 ret = dpaa2_sec_set_ipsec_session(cdev, conf, 2562 sess_private_data); 2563 break; 2564 case RTE_SECURITY_PROTOCOL_MACSEC: 2565 return -ENOTSUP; 2566 default: 2567 return -EINVAL; 2568 } 2569 if (ret != 0) { 2570 DPAA2_SEC_ERR("Failed to configure session parameters"); 2571 /* Return session to mempool */ 2572 rte_mempool_put(mempool, sess_private_data); 2573 return ret; 2574 } 2575 2576 set_sec_session_private_data(sess, sess_private_data); 2577 2578 return ret; 2579 } 2580 2581 /** Clear the memory of session so it doesn't leave key material behind */ 2582 static int 2583 dpaa2_sec_security_session_destroy(void *dev __rte_unused, 2584 struct rte_security_session *sess) 2585 { 2586 PMD_INIT_FUNC_TRACE(); 2587 void *sess_priv = get_sec_session_private_data(sess); 2588 2589 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv; 2590 2591 if (sess_priv) { 2592 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 2593 2594 rte_free(s->ctxt); 2595 rte_free(s->cipher_key.data); 2596 rte_free(s->auth_key.data); 2597 memset(sess, 0, sizeof(dpaa2_sec_session)); 2598 set_sec_session_private_data(sess, NULL); 2599 rte_mempool_put(sess_mp, sess_priv); 2600 } 2601 return 0; 2602 } 2603 2604 static int 2605 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev, 2606 struct rte_crypto_sym_xform *xform, 2607 struct rte_cryptodev_sym_session *sess, 2608 struct rte_mempool *mempool) 2609 { 2610 void *sess_private_data; 2611 int ret; 2612 2613 if (rte_mempool_get(mempool, &sess_private_data)) { 2614 DPAA2_SEC_ERR("Couldn't get object from session mempool"); 2615 return -ENOMEM; 2616 } 2617 2618 ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data); 2619 if (ret != 0) { 2620 DPAA2_SEC_ERR("Failed to configure session parameters"); 2621 /* Return session to mempool */ 2622 rte_mempool_put(mempool, sess_private_data); 2623 return ret; 2624 } 2625 2626 set_sym_session_private_data(sess, dev->driver_id, 2627 sess_private_data); 2628 2629 return 0; 2630 } 2631 2632 /** Clear the memory of session so it doesn't leave key material behind */ 2633 static void 2634 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev, 2635 struct rte_cryptodev_sym_session *sess) 2636 { 2637 PMD_INIT_FUNC_TRACE(); 2638 uint8_t index = dev->driver_id; 2639 void *sess_priv = get_sym_session_private_data(sess, index); 2640 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv; 2641 2642 if (sess_priv) { 2643 rte_free(s->ctxt); 2644 rte_free(s->cipher_key.data); 2645 rte_free(s->auth_key.data); 2646 memset(sess, 0, sizeof(dpaa2_sec_session)); 2647 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 2648 set_sym_session_private_data(sess, index, NULL); 2649 rte_mempool_put(sess_mp, sess_priv); 2650 } 2651 } 2652 2653 static int 2654 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused, 2655 struct rte_cryptodev_config *config __rte_unused) 2656 { 2657 PMD_INIT_FUNC_TRACE(); 2658 2659 return 0; 2660 } 2661 2662 static int 2663 dpaa2_sec_dev_start(struct rte_cryptodev *dev) 2664 { 2665 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 2666 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 2667 struct dpseci_attr attr; 2668 struct dpaa2_queue *dpaa2_q; 2669 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 2670 dev->data->queue_pairs; 2671 struct dpseci_rx_queue_attr rx_attr; 2672 struct dpseci_tx_queue_attr tx_attr; 2673 int ret, i; 2674 2675 PMD_INIT_FUNC_TRACE(); 2676 2677 memset(&attr, 0, sizeof(struct dpseci_attr)); 2678 2679 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token); 2680 if (ret) { 2681 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED", 2682 priv->hw_id); 2683 goto get_attr_failure; 2684 } 2685 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr); 2686 if (ret) { 2687 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC"); 2688 goto get_attr_failure; 2689 } 2690 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) { 2691 dpaa2_q = &qp[i]->rx_vq; 2692 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 2693 &rx_attr); 2694 dpaa2_q->fqid = rx_attr.fqid; 2695 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid); 2696 } 2697 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) { 2698 dpaa2_q = &qp[i]->tx_vq; 2699 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 2700 &tx_attr); 2701 dpaa2_q->fqid = tx_attr.fqid; 2702 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid); 2703 } 2704 2705 return 0; 2706 get_attr_failure: 2707 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 2708 return -1; 2709 } 2710 2711 static void 2712 dpaa2_sec_dev_stop(struct rte_cryptodev *dev) 2713 { 2714 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 2715 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 2716 int ret; 2717 2718 PMD_INIT_FUNC_TRACE(); 2719 2720 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 2721 if (ret) { 2722 DPAA2_SEC_ERR("Failure in disabling dpseci %d device", 2723 priv->hw_id); 2724 return; 2725 } 2726 2727 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token); 2728 if (ret < 0) { 2729 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret); 2730 return; 2731 } 2732 } 2733 2734 static int 2735 dpaa2_sec_dev_close(struct rte_cryptodev *dev) 2736 { 2737 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 2738 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 2739 int ret; 2740 2741 PMD_INIT_FUNC_TRACE(); 2742 2743 /* Function is reverse of dpaa2_sec_dev_init. 2744 * It does the following: 2745 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id 2746 * 2. Close the DPSECI device 2747 * 3. Free the allocated resources. 2748 */ 2749 2750 /*Close the device at underlying layer*/ 2751 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token); 2752 if (ret) { 2753 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret); 2754 return -1; 2755 } 2756 2757 /*Free the allocated memory for ethernet private data and dpseci*/ 2758 priv->hw = NULL; 2759 rte_free(dpseci); 2760 2761 return 0; 2762 } 2763 2764 static void 2765 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev, 2766 struct rte_cryptodev_info *info) 2767 { 2768 struct dpaa2_sec_dev_private *internals = dev->data->dev_private; 2769 2770 PMD_INIT_FUNC_TRACE(); 2771 if (info != NULL) { 2772 info->max_nb_queue_pairs = internals->max_nb_queue_pairs; 2773 info->feature_flags = dev->feature_flags; 2774 info->capabilities = dpaa2_sec_capabilities; 2775 /* No limit of number of sessions */ 2776 info->sym.max_nb_sessions = 0; 2777 info->driver_id = cryptodev_driver_id; 2778 } 2779 } 2780 2781 static 2782 void dpaa2_sec_stats_get(struct rte_cryptodev *dev, 2783 struct rte_cryptodev_stats *stats) 2784 { 2785 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 2786 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 2787 struct dpseci_sec_counters counters = {0}; 2788 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 2789 dev->data->queue_pairs; 2790 int ret, i; 2791 2792 PMD_INIT_FUNC_TRACE(); 2793 if (stats == NULL) { 2794 DPAA2_SEC_ERR("Invalid stats ptr NULL"); 2795 return; 2796 } 2797 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 2798 if (qp[i] == NULL) { 2799 DPAA2_SEC_DEBUG("Uninitialised queue pair"); 2800 continue; 2801 } 2802 2803 stats->enqueued_count += qp[i]->tx_vq.tx_pkts; 2804 stats->dequeued_count += qp[i]->rx_vq.rx_pkts; 2805 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts; 2806 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts; 2807 } 2808 2809 ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token, 2810 &counters); 2811 if (ret) { 2812 DPAA2_SEC_ERR("SEC counters failed"); 2813 } else { 2814 DPAA2_SEC_INFO("dpseci hardware stats:" 2815 "\n\tNum of Requests Dequeued = %" PRIu64 2816 "\n\tNum of Outbound Encrypt Requests = %" PRIu64 2817 "\n\tNum of Inbound Decrypt Requests = %" PRIu64 2818 "\n\tNum of Outbound Bytes Encrypted = %" PRIu64 2819 "\n\tNum of Outbound Bytes Protected = %" PRIu64 2820 "\n\tNum of Inbound Bytes Decrypted = %" PRIu64 2821 "\n\tNum of Inbound Bytes Validated = %" PRIu64, 2822 counters.dequeued_requests, 2823 counters.ob_enc_requests, 2824 counters.ib_dec_requests, 2825 counters.ob_enc_bytes, 2826 counters.ob_prot_bytes, 2827 counters.ib_dec_bytes, 2828 counters.ib_valid_bytes); 2829 } 2830 } 2831 2832 static 2833 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev) 2834 { 2835 int i; 2836 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 2837 (dev->data->queue_pairs); 2838 2839 PMD_INIT_FUNC_TRACE(); 2840 2841 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 2842 if (qp[i] == NULL) { 2843 DPAA2_SEC_DEBUG("Uninitialised queue pair"); 2844 continue; 2845 } 2846 qp[i]->tx_vq.rx_pkts = 0; 2847 qp[i]->tx_vq.tx_pkts = 0; 2848 qp[i]->tx_vq.err_pkts = 0; 2849 qp[i]->rx_vq.rx_pkts = 0; 2850 qp[i]->rx_vq.tx_pkts = 0; 2851 qp[i]->rx_vq.err_pkts = 0; 2852 } 2853 } 2854 2855 static struct rte_cryptodev_ops crypto_ops = { 2856 .dev_configure = dpaa2_sec_dev_configure, 2857 .dev_start = dpaa2_sec_dev_start, 2858 .dev_stop = dpaa2_sec_dev_stop, 2859 .dev_close = dpaa2_sec_dev_close, 2860 .dev_infos_get = dpaa2_sec_dev_infos_get, 2861 .stats_get = dpaa2_sec_stats_get, 2862 .stats_reset = dpaa2_sec_stats_reset, 2863 .queue_pair_setup = dpaa2_sec_queue_pair_setup, 2864 .queue_pair_release = dpaa2_sec_queue_pair_release, 2865 .queue_pair_count = dpaa2_sec_queue_pair_count, 2866 .sym_session_get_size = dpaa2_sec_sym_session_get_size, 2867 .sym_session_configure = dpaa2_sec_sym_session_configure, 2868 .sym_session_clear = dpaa2_sec_sym_session_clear, 2869 }; 2870 2871 static const struct rte_security_capability * 2872 dpaa2_sec_capabilities_get(void *device __rte_unused) 2873 { 2874 return dpaa2_sec_security_cap; 2875 } 2876 2877 struct rte_security_ops dpaa2_sec_security_ops = { 2878 .session_create = dpaa2_sec_security_session_create, 2879 .session_update = NULL, 2880 .session_stats_get = NULL, 2881 .session_destroy = dpaa2_sec_security_session_destroy, 2882 .set_pkt_metadata = NULL, 2883 .capabilities_get = dpaa2_sec_capabilities_get 2884 }; 2885 2886 static int 2887 dpaa2_sec_uninit(const struct rte_cryptodev *dev) 2888 { 2889 struct dpaa2_sec_dev_private *internals = dev->data->dev_private; 2890 2891 rte_free(dev->security_ctx); 2892 2893 rte_mempool_free(internals->fle_pool); 2894 2895 DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u", 2896 dev->data->name, rte_socket_id()); 2897 2898 return 0; 2899 } 2900 2901 static int 2902 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev) 2903 { 2904 struct dpaa2_sec_dev_private *internals; 2905 struct rte_device *dev = cryptodev->device; 2906 struct rte_dpaa2_device *dpaa2_dev; 2907 struct rte_security_ctx *security_instance; 2908 struct fsl_mc_io *dpseci; 2909 uint16_t token; 2910 struct dpseci_attr attr; 2911 int retcode, hw_id; 2912 char str[20]; 2913 2914 PMD_INIT_FUNC_TRACE(); 2915 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 2916 if (dpaa2_dev == NULL) { 2917 DPAA2_SEC_ERR("DPAA2 SEC device not found"); 2918 return -1; 2919 } 2920 hw_id = dpaa2_dev->object_id; 2921 2922 cryptodev->driver_id = cryptodev_driver_id; 2923 cryptodev->dev_ops = &crypto_ops; 2924 2925 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst; 2926 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst; 2927 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 2928 RTE_CRYPTODEV_FF_HW_ACCELERATED | 2929 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 2930 RTE_CRYPTODEV_FF_SECURITY | 2931 RTE_CRYPTODEV_FF_IN_PLACE_SGL | 2932 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | 2933 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | 2934 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | 2935 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; 2936 2937 internals = cryptodev->data->dev_private; 2938 2939 /* 2940 * For secondary processes, we don't initialise any further as primary 2941 * has already done this work. Only check we don't need a different 2942 * RX function 2943 */ 2944 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2945 DPAA2_SEC_DEBUG("Device already init by primary process"); 2946 return 0; 2947 } 2948 2949 /* Initialize security_ctx only for primary process*/ 2950 security_instance = rte_malloc("rte_security_instances_ops", 2951 sizeof(struct rte_security_ctx), 0); 2952 if (security_instance == NULL) 2953 return -ENOMEM; 2954 security_instance->device = (void *)cryptodev; 2955 security_instance->ops = &dpaa2_sec_security_ops; 2956 security_instance->sess_cnt = 0; 2957 cryptodev->security_ctx = security_instance; 2958 2959 /*Open the rte device via MC and save the handle for further use*/ 2960 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1, 2961 sizeof(struct fsl_mc_io), 0); 2962 if (!dpseci) { 2963 DPAA2_SEC_ERR( 2964 "Error in allocating the memory for dpsec object"); 2965 return -1; 2966 } 2967 dpseci->regs = rte_mcp_ptr_list[0]; 2968 2969 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token); 2970 if (retcode != 0) { 2971 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x", 2972 retcode); 2973 goto init_error; 2974 } 2975 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr); 2976 if (retcode != 0) { 2977 DPAA2_SEC_ERR( 2978 "Cannot get dpsec device attributed: Error = %x", 2979 retcode); 2980 goto init_error; 2981 } 2982 sprintf(cryptodev->data->name, "dpsec-%u", hw_id); 2983 2984 internals->max_nb_queue_pairs = attr.num_tx_queues; 2985 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs; 2986 internals->hw = dpseci; 2987 internals->token = token; 2988 2989 sprintf(str, "fle_pool_%d", cryptodev->data->dev_id); 2990 internals->fle_pool = rte_mempool_create((const char *)str, 2991 FLE_POOL_NUM_BUFS, 2992 FLE_POOL_BUF_SIZE, 2993 FLE_POOL_CACHE_SIZE, 0, 2994 NULL, NULL, NULL, NULL, 2995 SOCKET_ID_ANY, 0); 2996 if (!internals->fle_pool) { 2997 DPAA2_SEC_ERR("Mempool (%s) creation failed", str); 2998 goto init_error; 2999 } 3000 3001 DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name); 3002 return 0; 3003 3004 init_error: 3005 DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name); 3006 3007 /* dpaa2_sec_uninit(crypto_dev_name); */ 3008 return -EFAULT; 3009 } 3010 3011 static int 3012 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused, 3013 struct rte_dpaa2_device *dpaa2_dev) 3014 { 3015 struct rte_cryptodev *cryptodev; 3016 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 3017 3018 int retval; 3019 3020 sprintf(cryptodev_name, "dpsec-%d", dpaa2_dev->object_id); 3021 3022 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id()); 3023 if (cryptodev == NULL) 3024 return -ENOMEM; 3025 3026 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 3027 cryptodev->data->dev_private = rte_zmalloc_socket( 3028 "cryptodev private structure", 3029 sizeof(struct dpaa2_sec_dev_private), 3030 RTE_CACHE_LINE_SIZE, 3031 rte_socket_id()); 3032 3033 if (cryptodev->data->dev_private == NULL) 3034 rte_panic("Cannot allocate memzone for private " 3035 "device data"); 3036 } 3037 3038 dpaa2_dev->cryptodev = cryptodev; 3039 cryptodev->device = &dpaa2_dev->device; 3040 3041 /* init user callbacks */ 3042 TAILQ_INIT(&(cryptodev->link_intr_cbs)); 3043 3044 /* Invoke PMD device initialization function */ 3045 retval = dpaa2_sec_dev_init(cryptodev); 3046 if (retval == 0) 3047 return 0; 3048 3049 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 3050 rte_free(cryptodev->data->dev_private); 3051 3052 cryptodev->attached = RTE_CRYPTODEV_DETACHED; 3053 3054 return -ENXIO; 3055 } 3056 3057 static int 3058 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev) 3059 { 3060 struct rte_cryptodev *cryptodev; 3061 int ret; 3062 3063 cryptodev = dpaa2_dev->cryptodev; 3064 if (cryptodev == NULL) 3065 return -ENODEV; 3066 3067 ret = dpaa2_sec_uninit(cryptodev); 3068 if (ret) 3069 return ret; 3070 3071 return rte_cryptodev_pmd_destroy(cryptodev); 3072 } 3073 3074 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = { 3075 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA, 3076 .drv_type = DPAA2_CRYPTO, 3077 .driver = { 3078 .name = "DPAA2 SEC PMD" 3079 }, 3080 .probe = cryptodev_dpaa2_sec_probe, 3081 .remove = cryptodev_dpaa2_sec_remove, 3082 }; 3083 3084 static struct cryptodev_driver dpaa2_sec_crypto_drv; 3085 3086 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver); 3087 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, 3088 rte_dpaa2_sec_driver.driver, cryptodev_driver_id); 3089 3090 RTE_INIT(dpaa2_sec_init_log) 3091 { 3092 /* Bus level logs */ 3093 dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2"); 3094 if (dpaa2_logtype_sec >= 0) 3095 rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE); 3096 } 3097