1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2016 NXP 5 * 6 */ 7 8 #include <time.h> 9 #include <net/if.h> 10 11 #include <rte_mbuf.h> 12 #include <rte_cryptodev.h> 13 #include <rte_security_driver.h> 14 #include <rte_malloc.h> 15 #include <rte_memcpy.h> 16 #include <rte_string_fns.h> 17 #include <rte_cycles.h> 18 #include <rte_kvargs.h> 19 #include <rte_dev.h> 20 #include <rte_cryptodev_pmd.h> 21 #include <rte_common.h> 22 #include <rte_fslmc.h> 23 #include <fslmc_vfio.h> 24 #include <dpaa2_hw_pvt.h> 25 #include <dpaa2_hw_dpio.h> 26 #include <dpaa2_hw_mempool.h> 27 #include <fsl_dpseci.h> 28 #include <fsl_mc_sys.h> 29 30 #include "dpaa2_sec_priv.h" 31 #include "dpaa2_sec_logs.h" 32 33 /* RTA header files */ 34 #include <hw/desc/ipsec.h> 35 #include <hw/desc/algo.h> 36 37 /* Minimum job descriptor consists of a oneword job descriptor HEADER and 38 * a pointer to the shared descriptor 39 */ 40 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ) 41 #define FSL_VENDOR_ID 0x1957 42 #define FSL_DEVICE_ID 0x410 43 #define FSL_SUBSYSTEM_SEC 1 44 #define FSL_MC_DPSECI_DEVID 3 45 46 #define NO_PREFETCH 0 47 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */ 48 #define FLE_POOL_NUM_BUFS 32000 49 #define FLE_POOL_BUF_SIZE 256 50 #define FLE_POOL_CACHE_SIZE 512 51 #define SEC_FLC_DHR_OUTBOUND -114 52 #define SEC_FLC_DHR_INBOUND 0 53 54 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8; 55 56 static uint8_t cryptodev_driver_id; 57 58 static inline int 59 build_proto_fd(dpaa2_sec_session *sess, 60 struct rte_crypto_op *op, 61 struct qbman_fd *fd, uint16_t bpid) 62 { 63 struct rte_crypto_sym_op *sym_op = op->sym; 64 struct ctxt_priv *priv = sess->ctxt; 65 struct sec_flow_context *flc; 66 struct rte_mbuf *mbuf = sym_op->m_src; 67 68 if (likely(bpid < MAX_BPID)) 69 DPAA2_SET_FD_BPID(fd, bpid); 70 else 71 DPAA2_SET_FD_IVP(fd); 72 73 /* Save the shared descriptor */ 74 flc = &priv->flc_desc[0].flc; 75 76 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 77 DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off); 78 DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len); 79 DPAA2_SET_FD_FLC(fd, ((uint64_t)flc)); 80 81 /* save physical address of mbuf */ 82 op->sym->aead.digest.phys_addr = mbuf->buf_iova; 83 mbuf->buf_iova = (uint64_t)op; 84 85 return 0; 86 } 87 88 static inline int 89 build_authenc_gcm_fd(dpaa2_sec_session *sess, 90 struct rte_crypto_op *op, 91 struct qbman_fd *fd, uint16_t bpid) 92 { 93 struct rte_crypto_sym_op *sym_op = op->sym; 94 struct ctxt_priv *priv = sess->ctxt; 95 struct qbman_fle *fle, *sge; 96 struct sec_flow_context *flc; 97 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len; 98 int icv_len = sess->digest_length, retval; 99 uint8_t *old_icv; 100 struct rte_mbuf *dst; 101 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 102 sess->iv.offset); 103 104 PMD_INIT_FUNC_TRACE(); 105 106 if (sym_op->m_dst) 107 dst = sym_op->m_dst; 108 else 109 dst = sym_op->m_src; 110 111 /* TODO we are using the first FLE entry to store Mbuf and session ctxt. 112 * Currently we donot know which FLE has the mbuf stored. 113 * So while retreiving we can go back 1 FLE from the FD -ADDR 114 * to get the MBUF Addr from the previous FLE. 115 * We can have a better approach to use the inline Mbuf 116 */ 117 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 118 if (retval) { 119 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n"); 120 return -1; 121 } 122 memset(fle, 0, FLE_POOL_BUF_SIZE); 123 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op)); 124 DPAA2_FLE_SAVE_CTXT(fle, priv); 125 fle = fle + 1; 126 sge = fle + 2; 127 if (likely(bpid < MAX_BPID)) { 128 DPAA2_SET_FD_BPID(fd, bpid); 129 DPAA2_SET_FLE_BPID(fle, bpid); 130 DPAA2_SET_FLE_BPID(fle + 1, bpid); 131 DPAA2_SET_FLE_BPID(sge, bpid); 132 DPAA2_SET_FLE_BPID(sge + 1, bpid); 133 DPAA2_SET_FLE_BPID(sge + 2, bpid); 134 DPAA2_SET_FLE_BPID(sge + 3, bpid); 135 } else { 136 DPAA2_SET_FD_IVP(fd); 137 DPAA2_SET_FLE_IVP(fle); 138 DPAA2_SET_FLE_IVP((fle + 1)); 139 DPAA2_SET_FLE_IVP(sge); 140 DPAA2_SET_FLE_IVP((sge + 1)); 141 DPAA2_SET_FLE_IVP((sge + 2)); 142 DPAA2_SET_FLE_IVP((sge + 3)); 143 } 144 145 /* Save the shared descriptor */ 146 flc = &priv->flc_desc[0].flc; 147 /* Configure FD as a FRAME LIST */ 148 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 149 DPAA2_SET_FD_COMPOUND_FMT(fd); 150 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 151 152 PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n" 153 "iv-len=%d data_off: 0x%x\n", 154 sym_op->aead.data.offset, 155 sym_op->aead.data.length, 156 sym_op->aead.digest.length, 157 sess->iv.length, 158 sym_op->m_src->data_off); 159 160 /* Configure Output FLE with Scatter/Gather Entry */ 161 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 162 if (auth_only_len) 163 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 164 fle->length = (sess->dir == DIR_ENC) ? 165 (sym_op->aead.data.length + icv_len + auth_only_len) : 166 sym_op->aead.data.length + auth_only_len; 167 168 DPAA2_SET_FLE_SG_EXT(fle); 169 170 /* Configure Output SGE for Encap/Decap */ 171 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 172 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + 173 dst->data_off - auth_only_len); 174 sge->length = sym_op->aead.data.length + auth_only_len; 175 176 if (sess->dir == DIR_ENC) { 177 sge++; 178 DPAA2_SET_FLE_ADDR(sge, 179 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data)); 180 sge->length = sess->digest_length; 181 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length + 182 sess->iv.length + auth_only_len)); 183 } 184 DPAA2_SET_FLE_FIN(sge); 185 186 sge++; 187 fle++; 188 189 /* Configure Input FLE with Scatter/Gather Entry */ 190 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 191 DPAA2_SET_FLE_SG_EXT(fle); 192 DPAA2_SET_FLE_FIN(fle); 193 fle->length = (sess->dir == DIR_ENC) ? 194 (sym_op->aead.data.length + sess->iv.length + auth_only_len) : 195 (sym_op->aead.data.length + sess->iv.length + auth_only_len + 196 sess->digest_length); 197 198 /* Configure Input SGE for Encap/Decap */ 199 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr)); 200 sge->length = sess->iv.length; 201 sge++; 202 if (auth_only_len) { 203 DPAA2_SET_FLE_ADDR(sge, 204 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data)); 205 sge->length = auth_only_len; 206 DPAA2_SET_FLE_BPID(sge, bpid); 207 sge++; 208 } 209 210 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 211 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + 212 sym_op->m_src->data_off); 213 sge->length = sym_op->aead.data.length; 214 if (sess->dir == DIR_DEC) { 215 sge++; 216 old_icv = (uint8_t *)(sge + 1); 217 memcpy(old_icv, sym_op->aead.digest.data, 218 sess->digest_length); 219 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 220 sge->length = sess->digest_length; 221 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length + 222 sess->digest_length + 223 sess->iv.length + 224 auth_only_len)); 225 } 226 DPAA2_SET_FLE_FIN(sge); 227 228 if (auth_only_len) { 229 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 230 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 231 } 232 233 return 0; 234 } 235 236 static inline int 237 build_authenc_fd(dpaa2_sec_session *sess, 238 struct rte_crypto_op *op, 239 struct qbman_fd *fd, uint16_t bpid) 240 { 241 struct rte_crypto_sym_op *sym_op = op->sym; 242 struct ctxt_priv *priv = sess->ctxt; 243 struct qbman_fle *fle, *sge; 244 struct sec_flow_context *flc; 245 uint32_t auth_only_len = sym_op->auth.data.length - 246 sym_op->cipher.data.length; 247 int icv_len = sess->digest_length, retval; 248 uint8_t *old_icv; 249 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 250 sess->iv.offset); 251 struct rte_mbuf *dst; 252 253 PMD_INIT_FUNC_TRACE(); 254 255 if (sym_op->m_dst) 256 dst = sym_op->m_dst; 257 else 258 dst = sym_op->m_src; 259 260 /* we are using the first FLE entry to store Mbuf. 261 * Currently we donot know which FLE has the mbuf stored. 262 * So while retreiving we can go back 1 FLE from the FD -ADDR 263 * to get the MBUF Addr from the previous FLE. 264 * We can have a better approach to use the inline Mbuf 265 */ 266 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 267 if (retval) { 268 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n"); 269 return -1; 270 } 271 memset(fle, 0, FLE_POOL_BUF_SIZE); 272 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op)); 273 DPAA2_FLE_SAVE_CTXT(fle, priv); 274 fle = fle + 1; 275 sge = fle + 2; 276 if (likely(bpid < MAX_BPID)) { 277 DPAA2_SET_FD_BPID(fd, bpid); 278 DPAA2_SET_FLE_BPID(fle, bpid); 279 DPAA2_SET_FLE_BPID(fle + 1, bpid); 280 DPAA2_SET_FLE_BPID(sge, bpid); 281 DPAA2_SET_FLE_BPID(sge + 1, bpid); 282 DPAA2_SET_FLE_BPID(sge + 2, bpid); 283 DPAA2_SET_FLE_BPID(sge + 3, bpid); 284 } else { 285 DPAA2_SET_FD_IVP(fd); 286 DPAA2_SET_FLE_IVP(fle); 287 DPAA2_SET_FLE_IVP((fle + 1)); 288 DPAA2_SET_FLE_IVP(sge); 289 DPAA2_SET_FLE_IVP((sge + 1)); 290 DPAA2_SET_FLE_IVP((sge + 2)); 291 DPAA2_SET_FLE_IVP((sge + 3)); 292 } 293 294 /* Save the shared descriptor */ 295 flc = &priv->flc_desc[0].flc; 296 /* Configure FD as a FRAME LIST */ 297 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 298 DPAA2_SET_FD_COMPOUND_FMT(fd); 299 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 300 301 PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n" 302 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", 303 sym_op->auth.data.offset, 304 sym_op->auth.data.length, 305 sess->digest_length, 306 sym_op->cipher.data.offset, 307 sym_op->cipher.data.length, 308 sess->iv.length, 309 sym_op->m_src->data_off); 310 311 /* Configure Output FLE with Scatter/Gather Entry */ 312 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 313 if (auth_only_len) 314 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 315 fle->length = (sess->dir == DIR_ENC) ? 316 (sym_op->cipher.data.length + icv_len) : 317 sym_op->cipher.data.length; 318 319 DPAA2_SET_FLE_SG_EXT(fle); 320 321 /* Configure Output SGE for Encap/Decap */ 322 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 323 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + 324 dst->data_off); 325 sge->length = sym_op->cipher.data.length; 326 327 if (sess->dir == DIR_ENC) { 328 sge++; 329 DPAA2_SET_FLE_ADDR(sge, 330 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 331 sge->length = sess->digest_length; 332 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 333 sess->iv.length)); 334 } 335 DPAA2_SET_FLE_FIN(sge); 336 337 sge++; 338 fle++; 339 340 /* Configure Input FLE with Scatter/Gather Entry */ 341 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 342 DPAA2_SET_FLE_SG_EXT(fle); 343 DPAA2_SET_FLE_FIN(fle); 344 fle->length = (sess->dir == DIR_ENC) ? 345 (sym_op->auth.data.length + sess->iv.length) : 346 (sym_op->auth.data.length + sess->iv.length + 347 sess->digest_length); 348 349 /* Configure Input SGE for Encap/Decap */ 350 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 351 sge->length = sess->iv.length; 352 sge++; 353 354 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 355 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + 356 sym_op->m_src->data_off); 357 sge->length = sym_op->auth.data.length; 358 if (sess->dir == DIR_DEC) { 359 sge++; 360 old_icv = (uint8_t *)(sge + 1); 361 memcpy(old_icv, sym_op->auth.digest.data, 362 sess->digest_length); 363 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 364 sge->length = sess->digest_length; 365 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 366 sess->digest_length + 367 sess->iv.length)); 368 } 369 DPAA2_SET_FLE_FIN(sge); 370 if (auth_only_len) { 371 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 372 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 373 } 374 return 0; 375 } 376 377 static inline int 378 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 379 struct qbman_fd *fd, uint16_t bpid) 380 { 381 struct rte_crypto_sym_op *sym_op = op->sym; 382 struct qbman_fle *fle, *sge; 383 struct sec_flow_context *flc; 384 struct ctxt_priv *priv = sess->ctxt; 385 uint8_t *old_digest; 386 int retval; 387 388 PMD_INIT_FUNC_TRACE(); 389 390 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 391 if (retval) { 392 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n"); 393 return -1; 394 } 395 memset(fle, 0, FLE_POOL_BUF_SIZE); 396 /* TODO we are using the first FLE entry to store Mbuf. 397 * Currently we donot know which FLE has the mbuf stored. 398 * So while retreiving we can go back 1 FLE from the FD -ADDR 399 * to get the MBUF Addr from the previous FLE. 400 * We can have a better approach to use the inline Mbuf 401 */ 402 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op)); 403 DPAA2_FLE_SAVE_CTXT(fle, priv); 404 fle = fle + 1; 405 406 if (likely(bpid < MAX_BPID)) { 407 DPAA2_SET_FD_BPID(fd, bpid); 408 DPAA2_SET_FLE_BPID(fle, bpid); 409 DPAA2_SET_FLE_BPID(fle + 1, bpid); 410 } else { 411 DPAA2_SET_FD_IVP(fd); 412 DPAA2_SET_FLE_IVP(fle); 413 DPAA2_SET_FLE_IVP((fle + 1)); 414 } 415 flc = &priv->flc_desc[DESC_INITFINAL].flc; 416 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 417 418 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 419 fle->length = sess->digest_length; 420 421 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 422 DPAA2_SET_FD_COMPOUND_FMT(fd); 423 fle++; 424 425 if (sess->dir == DIR_ENC) { 426 DPAA2_SET_FLE_ADDR(fle, 427 DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 428 DPAA2_SET_FLE_OFFSET(fle, sym_op->auth.data.offset + 429 sym_op->m_src->data_off); 430 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length); 431 fle->length = sym_op->auth.data.length; 432 } else { 433 sge = fle + 2; 434 DPAA2_SET_FLE_SG_EXT(fle); 435 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 436 437 if (likely(bpid < MAX_BPID)) { 438 DPAA2_SET_FLE_BPID(sge, bpid); 439 DPAA2_SET_FLE_BPID(sge + 1, bpid); 440 } else { 441 DPAA2_SET_FLE_IVP(sge); 442 DPAA2_SET_FLE_IVP((sge + 1)); 443 } 444 DPAA2_SET_FLE_ADDR(sge, 445 DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 446 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + 447 sym_op->m_src->data_off); 448 449 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length + 450 sess->digest_length); 451 sge->length = sym_op->auth.data.length; 452 sge++; 453 old_digest = (uint8_t *)(sge + 1); 454 rte_memcpy(old_digest, sym_op->auth.digest.data, 455 sess->digest_length); 456 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest)); 457 sge->length = sess->digest_length; 458 fle->length = sym_op->auth.data.length + 459 sess->digest_length; 460 DPAA2_SET_FLE_FIN(sge); 461 } 462 DPAA2_SET_FLE_FIN(fle); 463 464 return 0; 465 } 466 467 static int 468 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 469 struct qbman_fd *fd, uint16_t bpid) 470 { 471 struct rte_crypto_sym_op *sym_op = op->sym; 472 struct qbman_fle *fle, *sge; 473 int retval; 474 struct sec_flow_context *flc; 475 struct ctxt_priv *priv = sess->ctxt; 476 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 477 sess->iv.offset); 478 struct rte_mbuf *dst; 479 480 PMD_INIT_FUNC_TRACE(); 481 482 if (sym_op->m_dst) 483 dst = sym_op->m_dst; 484 else 485 dst = sym_op->m_src; 486 487 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 488 if (retval) { 489 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n"); 490 return -1; 491 } 492 memset(fle, 0, FLE_POOL_BUF_SIZE); 493 /* TODO we are using the first FLE entry to store Mbuf. 494 * Currently we donot know which FLE has the mbuf stored. 495 * So while retreiving we can go back 1 FLE from the FD -ADDR 496 * to get the MBUF Addr from the previous FLE. 497 * We can have a better approach to use the inline Mbuf 498 */ 499 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op)); 500 DPAA2_FLE_SAVE_CTXT(fle, priv); 501 fle = fle + 1; 502 sge = fle + 2; 503 504 if (likely(bpid < MAX_BPID)) { 505 DPAA2_SET_FD_BPID(fd, bpid); 506 DPAA2_SET_FLE_BPID(fle, bpid); 507 DPAA2_SET_FLE_BPID(fle + 1, bpid); 508 DPAA2_SET_FLE_BPID(sge, bpid); 509 DPAA2_SET_FLE_BPID(sge + 1, bpid); 510 } else { 511 DPAA2_SET_FD_IVP(fd); 512 DPAA2_SET_FLE_IVP(fle); 513 DPAA2_SET_FLE_IVP((fle + 1)); 514 DPAA2_SET_FLE_IVP(sge); 515 DPAA2_SET_FLE_IVP((sge + 1)); 516 } 517 518 flc = &priv->flc_desc[0].flc; 519 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 520 DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length + 521 sess->iv.length); 522 DPAA2_SET_FD_COMPOUND_FMT(fd); 523 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 524 525 PMD_TX_LOG(DEBUG, "cipher_off: 0x%x/length %d,ivlen=%d data_off: 0x%x", 526 sym_op->cipher.data.offset, 527 sym_op->cipher.data.length, 528 sess->iv.length, 529 sym_op->m_src->data_off); 530 531 DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 532 DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset + 533 dst->data_off); 534 535 fle->length = sym_op->cipher.data.length + sess->iv.length; 536 537 PMD_TX_LOG(DEBUG, "1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d", 538 flc, fle, fle->addr_hi, fle->addr_lo, fle->length); 539 540 fle++; 541 542 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 543 fle->length = sym_op->cipher.data.length + sess->iv.length; 544 545 DPAA2_SET_FLE_SG_EXT(fle); 546 547 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 548 sge->length = sess->iv.length; 549 550 sge++; 551 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 552 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + 553 sym_op->m_src->data_off); 554 555 sge->length = sym_op->cipher.data.length; 556 DPAA2_SET_FLE_FIN(sge); 557 DPAA2_SET_FLE_FIN(fle); 558 559 PMD_TX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d", 560 (void *)DPAA2_GET_FD_ADDR(fd), 561 DPAA2_GET_FD_BPID(fd), 562 rte_dpaa2_bpid_info[bpid].meta_data_size, 563 DPAA2_GET_FD_OFFSET(fd), 564 DPAA2_GET_FD_LEN(fd)); 565 566 return 0; 567 } 568 569 static inline int 570 build_sec_fd(struct rte_crypto_op *op, 571 struct qbman_fd *fd, uint16_t bpid) 572 { 573 int ret = -1; 574 dpaa2_sec_session *sess; 575 576 PMD_INIT_FUNC_TRACE(); 577 /* 578 * Segmented buffer is not supported. 579 */ 580 if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) { 581 op->status = RTE_CRYPTO_OP_STATUS_ERROR; 582 return -ENOTSUP; 583 } 584 585 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) 586 sess = (dpaa2_sec_session *)get_session_private_data( 587 op->sym->session, cryptodev_driver_id); 588 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) 589 sess = (dpaa2_sec_session *)get_sec_session_private_data( 590 op->sym->sec_session); 591 else 592 return -1; 593 594 switch (sess->ctxt_type) { 595 case DPAA2_SEC_CIPHER: 596 ret = build_cipher_fd(sess, op, fd, bpid); 597 break; 598 case DPAA2_SEC_AUTH: 599 ret = build_auth_fd(sess, op, fd, bpid); 600 break; 601 case DPAA2_SEC_AEAD: 602 ret = build_authenc_gcm_fd(sess, op, fd, bpid); 603 break; 604 case DPAA2_SEC_CIPHER_HASH: 605 ret = build_authenc_fd(sess, op, fd, bpid); 606 break; 607 case DPAA2_SEC_IPSEC: 608 ret = build_proto_fd(sess, op, fd, bpid); 609 break; 610 case DPAA2_SEC_HASH_CIPHER: 611 default: 612 RTE_LOG(ERR, PMD, "error: Unsupported session\n"); 613 } 614 return ret; 615 } 616 617 static uint16_t 618 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, 619 uint16_t nb_ops) 620 { 621 /* Function to transmit the frames to given device and VQ*/ 622 uint32_t loop; 623 int32_t ret; 624 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 625 uint32_t frames_to_send; 626 struct qbman_eq_desc eqdesc; 627 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 628 struct qbman_swp *swp; 629 uint16_t num_tx = 0; 630 /*todo - need to support multiple buffer pools */ 631 uint16_t bpid; 632 struct rte_mempool *mb_pool; 633 634 if (unlikely(nb_ops == 0)) 635 return 0; 636 637 if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { 638 RTE_LOG(ERR, PMD, "sessionless crypto op not supported\n"); 639 return 0; 640 } 641 /*Prepare enqueue descriptor*/ 642 qbman_eq_desc_clear(&eqdesc); 643 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); 644 qbman_eq_desc_set_response(&eqdesc, 0, 0); 645 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid); 646 647 if (!DPAA2_PER_LCORE_SEC_DPIO) { 648 ret = dpaa2_affine_qbman_swp_sec(); 649 if (ret) { 650 RTE_LOG(ERR, PMD, "Failure in affining portal\n"); 651 return 0; 652 } 653 } 654 swp = DPAA2_PER_LCORE_SEC_PORTAL; 655 656 while (nb_ops) { 657 frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops; 658 659 for (loop = 0; loop < frames_to_send; loop++) { 660 /*Clear the unused FD fields before sending*/ 661 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd)); 662 mb_pool = (*ops)->sym->m_src->pool; 663 bpid = mempool_to_bpid(mb_pool); 664 ret = build_sec_fd(*ops, &fd_arr[loop], bpid); 665 if (ret) { 666 PMD_DRV_LOG(ERR, "error: Improper packet" 667 " contents for crypto operation\n"); 668 goto skip_tx; 669 } 670 ops++; 671 } 672 loop = 0; 673 while (loop < frames_to_send) { 674 loop += qbman_swp_enqueue_multiple(swp, &eqdesc, 675 &fd_arr[loop], 676 NULL, 677 frames_to_send - loop); 678 } 679 680 num_tx += frames_to_send; 681 nb_ops -= frames_to_send; 682 } 683 skip_tx: 684 dpaa2_qp->tx_vq.tx_pkts += num_tx; 685 dpaa2_qp->tx_vq.err_pkts += nb_ops; 686 return num_tx; 687 } 688 689 static inline struct rte_crypto_op * 690 sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id) 691 { 692 struct rte_crypto_op *op; 693 uint16_t len = DPAA2_GET_FD_LEN(fd); 694 uint16_t diff = 0; 695 dpaa2_sec_session *sess_priv; 696 697 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF( 698 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)), 699 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 700 701 op = (struct rte_crypto_op *)mbuf->buf_iova; 702 mbuf->buf_iova = op->sym->aead.digest.phys_addr; 703 op->sym->aead.digest.phys_addr = 0L; 704 705 sess_priv = (dpaa2_sec_session *)get_sec_session_private_data( 706 op->sym->sec_session); 707 if (sess_priv->dir == DIR_ENC) 708 mbuf->data_off += SEC_FLC_DHR_OUTBOUND; 709 else 710 mbuf->data_off += SEC_FLC_DHR_INBOUND; 711 diff = len - mbuf->pkt_len; 712 mbuf->pkt_len += diff; 713 mbuf->data_len += diff; 714 715 return op; 716 } 717 718 static inline struct rte_crypto_op * 719 sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id) 720 { 721 struct qbman_fle *fle; 722 struct rte_crypto_op *op; 723 struct ctxt_priv *priv; 724 struct rte_mbuf *dst, *src; 725 726 if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single) 727 return sec_simple_fd_to_mbuf(fd, driver_id); 728 729 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); 730 731 PMD_RX_LOG(DEBUG, "FLE addr = %x - %x, offset = %x", 732 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset); 733 734 /* we are using the first FLE entry to store Mbuf. 735 * Currently we donot know which FLE has the mbuf stored. 736 * So while retreiving we can go back 1 FLE from the FD -ADDR 737 * to get the MBUF Addr from the previous FLE. 738 * We can have a better approach to use the inline Mbuf 739 */ 740 741 if (unlikely(DPAA2_GET_FD_IVP(fd))) { 742 /* TODO complete it. */ 743 RTE_LOG(ERR, PMD, "error: Non inline buffer - WHAT to DO?\n"); 744 return NULL; 745 } 746 op = (struct rte_crypto_op *)DPAA2_IOVA_TO_VADDR( 747 DPAA2_GET_FLE_ADDR((fle - 1))); 748 749 /* Prefeth op */ 750 src = op->sym->m_src; 751 rte_prefetch0(src); 752 753 if (op->sym->m_dst) { 754 dst = op->sym->m_dst; 755 rte_prefetch0(dst); 756 } else 757 dst = src; 758 759 PMD_RX_LOG(DEBUG, "mbuf %p BMAN buf addr %p", 760 (void *)dst, dst->buf_addr); 761 762 PMD_RX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d", 763 (void *)DPAA2_GET_FD_ADDR(fd), 764 DPAA2_GET_FD_BPID(fd), 765 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, 766 DPAA2_GET_FD_OFFSET(fd), 767 DPAA2_GET_FD_LEN(fd)); 768 769 /* free the fle memory */ 770 priv = (struct ctxt_priv *)DPAA2_GET_FLE_CTXT(fle - 1); 771 rte_mempool_put(priv->fle_pool, (void *)(fle - 1)); 772 773 return op; 774 } 775 776 static uint16_t 777 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, 778 uint16_t nb_ops) 779 { 780 /* Function is responsible to receive frames for a given device and VQ*/ 781 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 782 struct rte_cryptodev *dev = 783 (struct rte_cryptodev *)(dpaa2_qp->rx_vq.dev); 784 struct qbman_result *dq_storage; 785 uint32_t fqid = dpaa2_qp->rx_vq.fqid; 786 int ret, num_rx = 0; 787 uint8_t is_last = 0, status; 788 struct qbman_swp *swp; 789 const struct qbman_fd *fd; 790 struct qbman_pull_desc pulldesc; 791 792 if (!DPAA2_PER_LCORE_SEC_DPIO) { 793 ret = dpaa2_affine_qbman_swp_sec(); 794 if (ret) { 795 RTE_LOG(ERR, PMD, "Failure in affining portal\n"); 796 return 0; 797 } 798 } 799 swp = DPAA2_PER_LCORE_SEC_PORTAL; 800 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0]; 801 802 qbman_pull_desc_clear(&pulldesc); 803 qbman_pull_desc_set_numframes(&pulldesc, 804 (nb_ops > DPAA2_DQRR_RING_SIZE) ? 805 DPAA2_DQRR_RING_SIZE : nb_ops); 806 qbman_pull_desc_set_fq(&pulldesc, fqid); 807 qbman_pull_desc_set_storage(&pulldesc, dq_storage, 808 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage), 809 1); 810 811 /*Issue a volatile dequeue command. */ 812 while (1) { 813 if (qbman_swp_pull(swp, &pulldesc)) { 814 RTE_LOG(WARNING, PMD, 815 "SEC VDQ command is not issued : QBMAN busy\n"); 816 /* Portal was busy, try again */ 817 continue; 818 } 819 break; 820 }; 821 822 /* Receive the packets till Last Dequeue entry is found with 823 * respect to the above issues PULL command. 824 */ 825 while (!is_last) { 826 /* Check if the previous issued command is completed. 827 * Also seems like the SWP is shared between the Ethernet Driver 828 * and the SEC driver. 829 */ 830 while (!qbman_check_command_complete(dq_storage)) 831 ; 832 833 /* Loop until the dq_storage is updated with 834 * new token by QBMAN 835 */ 836 while (!qbman_check_new_result(dq_storage)) 837 ; 838 /* Check whether Last Pull command is Expired and 839 * setting Condition for Loop termination 840 */ 841 if (qbman_result_DQ_is_pull_complete(dq_storage)) { 842 is_last = 1; 843 /* Check for valid frame. */ 844 status = (uint8_t)qbman_result_DQ_flags(dq_storage); 845 if (unlikely( 846 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) { 847 PMD_RX_LOG(DEBUG, "No frame is delivered"); 848 continue; 849 } 850 } 851 852 fd = qbman_result_DQ_fd(dq_storage); 853 ops[num_rx] = sec_fd_to_mbuf(fd, dev->driver_id); 854 855 if (unlikely(fd->simple.frc)) { 856 /* TODO Parse SEC errors */ 857 RTE_LOG(ERR, PMD, "SEC returned Error - %x\n", 858 fd->simple.frc); 859 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR; 860 } else { 861 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 862 } 863 864 num_rx++; 865 dq_storage++; 866 } /* End of Packet Rx loop */ 867 868 dpaa2_qp->rx_vq.rx_pkts += num_rx; 869 870 PMD_RX_LOG(DEBUG, "SEC Received %d Packets", num_rx); 871 /*Return the total number of packets received to DPAA2 app*/ 872 return num_rx; 873 } 874 875 /** Release queue pair */ 876 static int 877 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) 878 { 879 struct dpaa2_sec_qp *qp = 880 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id]; 881 882 PMD_INIT_FUNC_TRACE(); 883 884 if (qp->rx_vq.q_storage) { 885 dpaa2_free_dq_storage(qp->rx_vq.q_storage); 886 rte_free(qp->rx_vq.q_storage); 887 } 888 rte_free(qp); 889 890 dev->data->queue_pairs[queue_pair_id] = NULL; 891 892 return 0; 893 } 894 895 /** Setup a queue pair */ 896 static int 897 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, 898 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf, 899 __rte_unused int socket_id, 900 __rte_unused struct rte_mempool *session_pool) 901 { 902 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 903 struct dpaa2_sec_qp *qp; 904 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 905 struct dpseci_rx_queue_cfg cfg; 906 int32_t retcode; 907 908 PMD_INIT_FUNC_TRACE(); 909 910 /* If qp is already in use free ring memory and qp metadata. */ 911 if (dev->data->queue_pairs[qp_id] != NULL) { 912 PMD_DRV_LOG(INFO, "QP already setup"); 913 return 0; 914 } 915 916 PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, conf =%p", 917 dev, qp_id, qp_conf); 918 919 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 920 921 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp), 922 RTE_CACHE_LINE_SIZE); 923 if (!qp) { 924 RTE_LOG(ERR, PMD, "malloc failed for rx/tx queues\n"); 925 return -1; 926 } 927 928 qp->rx_vq.dev = dev; 929 qp->tx_vq.dev = dev; 930 qp->rx_vq.q_storage = rte_malloc("sec dq storage", 931 sizeof(struct queue_storage_info_t), 932 RTE_CACHE_LINE_SIZE); 933 if (!qp->rx_vq.q_storage) { 934 RTE_LOG(ERR, PMD, "malloc failed for q_storage\n"); 935 return -1; 936 } 937 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t)); 938 939 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) { 940 RTE_LOG(ERR, PMD, "dpaa2_alloc_dq_storage failed\n"); 941 return -1; 942 } 943 944 dev->data->queue_pairs[qp_id] = qp; 945 946 cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX; 947 cfg.user_ctx = (uint64_t)(&qp->rx_vq); 948 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 949 qp_id, &cfg); 950 return retcode; 951 } 952 953 /** Start queue pair */ 954 static int 955 dpaa2_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev, 956 __rte_unused uint16_t queue_pair_id) 957 { 958 PMD_INIT_FUNC_TRACE(); 959 960 return 0; 961 } 962 963 /** Stop queue pair */ 964 static int 965 dpaa2_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev, 966 __rte_unused uint16_t queue_pair_id) 967 { 968 PMD_INIT_FUNC_TRACE(); 969 970 return 0; 971 } 972 973 /** Return the number of allocated queue pairs */ 974 static uint32_t 975 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev) 976 { 977 PMD_INIT_FUNC_TRACE(); 978 979 return dev->data->nb_queue_pairs; 980 } 981 982 /** Returns the size of the aesni gcm session structure */ 983 static unsigned int 984 dpaa2_sec_session_get_size(struct rte_cryptodev *dev __rte_unused) 985 { 986 PMD_INIT_FUNC_TRACE(); 987 988 return sizeof(dpaa2_sec_session); 989 } 990 991 static int 992 dpaa2_sec_cipher_init(struct rte_cryptodev *dev, 993 struct rte_crypto_sym_xform *xform, 994 dpaa2_sec_session *session) 995 { 996 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 997 struct alginfo cipherdata; 998 int bufsize, i; 999 struct ctxt_priv *priv; 1000 struct sec_flow_context *flc; 1001 1002 PMD_INIT_FUNC_TRACE(); 1003 1004 /* For SEC CIPHER only one descriptor is required. */ 1005 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1006 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 1007 RTE_CACHE_LINE_SIZE); 1008 if (priv == NULL) { 1009 RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n"); 1010 return -1; 1011 } 1012 1013 priv->fle_pool = dev_priv->fle_pool; 1014 1015 flc = &priv->flc_desc[0].flc; 1016 1017 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, 1018 RTE_CACHE_LINE_SIZE); 1019 if (session->cipher_key.data == NULL) { 1020 RTE_LOG(ERR, PMD, "No Memory for cipher key\n"); 1021 rte_free(priv); 1022 return -1; 1023 } 1024 session->cipher_key.length = xform->cipher.key.length; 1025 1026 memcpy(session->cipher_key.data, xform->cipher.key.data, 1027 xform->cipher.key.length); 1028 cipherdata.key = (uint64_t)session->cipher_key.data; 1029 cipherdata.keylen = session->cipher_key.length; 1030 cipherdata.key_enc_flags = 0; 1031 cipherdata.key_type = RTA_DATA_IMM; 1032 1033 /* Set IV parameters */ 1034 session->iv.offset = xform->cipher.iv.offset; 1035 session->iv.length = xform->cipher.iv.length; 1036 1037 switch (xform->cipher.algo) { 1038 case RTE_CRYPTO_CIPHER_AES_CBC: 1039 cipherdata.algtype = OP_ALG_ALGSEL_AES; 1040 cipherdata.algmode = OP_ALG_AAI_CBC; 1041 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 1042 break; 1043 case RTE_CRYPTO_CIPHER_3DES_CBC: 1044 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 1045 cipherdata.algmode = OP_ALG_AAI_CBC; 1046 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 1047 break; 1048 case RTE_CRYPTO_CIPHER_AES_CTR: 1049 cipherdata.algtype = OP_ALG_ALGSEL_AES; 1050 cipherdata.algmode = OP_ALG_AAI_CTR; 1051 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 1052 break; 1053 case RTE_CRYPTO_CIPHER_3DES_CTR: 1054 case RTE_CRYPTO_CIPHER_AES_ECB: 1055 case RTE_CRYPTO_CIPHER_3DES_ECB: 1056 case RTE_CRYPTO_CIPHER_AES_XTS: 1057 case RTE_CRYPTO_CIPHER_AES_F8: 1058 case RTE_CRYPTO_CIPHER_ARC4: 1059 case RTE_CRYPTO_CIPHER_KASUMI_F8: 1060 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 1061 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 1062 case RTE_CRYPTO_CIPHER_NULL: 1063 RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n", 1064 xform->cipher.algo); 1065 goto error_out; 1066 default: 1067 RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n", 1068 xform->cipher.algo); 1069 goto error_out; 1070 } 1071 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1072 DIR_ENC : DIR_DEC; 1073 1074 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 1075 &cipherdata, NULL, session->iv.length, 1076 session->dir); 1077 if (bufsize < 0) { 1078 RTE_LOG(ERR, PMD, "Crypto: Descriptor build failed\n"); 1079 goto error_out; 1080 } 1081 flc->dhr = 0; 1082 flc->bpv0 = 0x1; 1083 flc->mode_bits = 0x8000; 1084 1085 flc->word1_sdl = (uint8_t)bufsize; 1086 flc->word2_rflc_31_0 = lower_32_bits( 1087 (uint64_t)&(((struct dpaa2_sec_qp *) 1088 dev->data->queue_pairs[0])->rx_vq)); 1089 flc->word3_rflc_63_32 = upper_32_bits( 1090 (uint64_t)&(((struct dpaa2_sec_qp *) 1091 dev->data->queue_pairs[0])->rx_vq)); 1092 session->ctxt = priv; 1093 1094 for (i = 0; i < bufsize; i++) 1095 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n", 1096 i, priv->flc_desc[0].desc[i]); 1097 1098 return 0; 1099 1100 error_out: 1101 rte_free(session->cipher_key.data); 1102 rte_free(priv); 1103 return -1; 1104 } 1105 1106 static int 1107 dpaa2_sec_auth_init(struct rte_cryptodev *dev, 1108 struct rte_crypto_sym_xform *xform, 1109 dpaa2_sec_session *session) 1110 { 1111 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1112 struct alginfo authdata; 1113 unsigned int bufsize, i; 1114 struct ctxt_priv *priv; 1115 struct sec_flow_context *flc; 1116 1117 PMD_INIT_FUNC_TRACE(); 1118 1119 /* For SEC AUTH three descriptors are required for various stages */ 1120 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1121 sizeof(struct ctxt_priv) + 3 * 1122 sizeof(struct sec_flc_desc), 1123 RTE_CACHE_LINE_SIZE); 1124 if (priv == NULL) { 1125 RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n"); 1126 return -1; 1127 } 1128 1129 priv->fle_pool = dev_priv->fle_pool; 1130 flc = &priv->flc_desc[DESC_INITFINAL].flc; 1131 1132 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length, 1133 RTE_CACHE_LINE_SIZE); 1134 if (session->auth_key.data == NULL) { 1135 RTE_LOG(ERR, PMD, "No Memory for auth key\n"); 1136 rte_free(priv); 1137 return -1; 1138 } 1139 session->auth_key.length = xform->auth.key.length; 1140 1141 memcpy(session->auth_key.data, xform->auth.key.data, 1142 xform->auth.key.length); 1143 authdata.key = (uint64_t)session->auth_key.data; 1144 authdata.keylen = session->auth_key.length; 1145 authdata.key_enc_flags = 0; 1146 authdata.key_type = RTA_DATA_IMM; 1147 1148 session->digest_length = xform->auth.digest_length; 1149 1150 switch (xform->auth.algo) { 1151 case RTE_CRYPTO_AUTH_SHA1_HMAC: 1152 authdata.algtype = OP_ALG_ALGSEL_SHA1; 1153 authdata.algmode = OP_ALG_AAI_HMAC; 1154 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 1155 break; 1156 case RTE_CRYPTO_AUTH_MD5_HMAC: 1157 authdata.algtype = OP_ALG_ALGSEL_MD5; 1158 authdata.algmode = OP_ALG_AAI_HMAC; 1159 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 1160 break; 1161 case RTE_CRYPTO_AUTH_SHA256_HMAC: 1162 authdata.algtype = OP_ALG_ALGSEL_SHA256; 1163 authdata.algmode = OP_ALG_AAI_HMAC; 1164 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 1165 break; 1166 case RTE_CRYPTO_AUTH_SHA384_HMAC: 1167 authdata.algtype = OP_ALG_ALGSEL_SHA384; 1168 authdata.algmode = OP_ALG_AAI_HMAC; 1169 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 1170 break; 1171 case RTE_CRYPTO_AUTH_SHA512_HMAC: 1172 authdata.algtype = OP_ALG_ALGSEL_SHA512; 1173 authdata.algmode = OP_ALG_AAI_HMAC; 1174 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 1175 break; 1176 case RTE_CRYPTO_AUTH_SHA224_HMAC: 1177 authdata.algtype = OP_ALG_ALGSEL_SHA224; 1178 authdata.algmode = OP_ALG_AAI_HMAC; 1179 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 1180 break; 1181 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 1182 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 1183 case RTE_CRYPTO_AUTH_NULL: 1184 case RTE_CRYPTO_AUTH_SHA1: 1185 case RTE_CRYPTO_AUTH_SHA256: 1186 case RTE_CRYPTO_AUTH_SHA512: 1187 case RTE_CRYPTO_AUTH_SHA224: 1188 case RTE_CRYPTO_AUTH_SHA384: 1189 case RTE_CRYPTO_AUTH_MD5: 1190 case RTE_CRYPTO_AUTH_AES_GMAC: 1191 case RTE_CRYPTO_AUTH_KASUMI_F9: 1192 case RTE_CRYPTO_AUTH_AES_CMAC: 1193 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 1194 case RTE_CRYPTO_AUTH_ZUC_EIA3: 1195 RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n", 1196 xform->auth.algo); 1197 goto error_out; 1198 default: 1199 RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n", 1200 xform->auth.algo); 1201 goto error_out; 1202 } 1203 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? 1204 DIR_ENC : DIR_DEC; 1205 1206 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 1207 1, 0, &authdata, !session->dir, 1208 session->digest_length); 1209 1210 flc->word1_sdl = (uint8_t)bufsize; 1211 flc->word2_rflc_31_0 = lower_32_bits( 1212 (uint64_t)&(((struct dpaa2_sec_qp *) 1213 dev->data->queue_pairs[0])->rx_vq)); 1214 flc->word3_rflc_63_32 = upper_32_bits( 1215 (uint64_t)&(((struct dpaa2_sec_qp *) 1216 dev->data->queue_pairs[0])->rx_vq)); 1217 session->ctxt = priv; 1218 for (i = 0; i < bufsize; i++) 1219 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n", 1220 i, priv->flc_desc[DESC_INITFINAL].desc[i]); 1221 1222 1223 return 0; 1224 1225 error_out: 1226 rte_free(session->auth_key.data); 1227 rte_free(priv); 1228 return -1; 1229 } 1230 1231 static int 1232 dpaa2_sec_aead_init(struct rte_cryptodev *dev, 1233 struct rte_crypto_sym_xform *xform, 1234 dpaa2_sec_session *session) 1235 { 1236 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt; 1237 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1238 struct alginfo aeaddata; 1239 unsigned int bufsize, i; 1240 struct ctxt_priv *priv; 1241 struct sec_flow_context *flc; 1242 struct rte_crypto_aead_xform *aead_xform = &xform->aead; 1243 int err; 1244 1245 PMD_INIT_FUNC_TRACE(); 1246 1247 /* Set IV parameters */ 1248 session->iv.offset = aead_xform->iv.offset; 1249 session->iv.length = aead_xform->iv.length; 1250 session->ctxt_type = DPAA2_SEC_AEAD; 1251 1252 /* For SEC AEAD only one descriptor is required */ 1253 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1254 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 1255 RTE_CACHE_LINE_SIZE); 1256 if (priv == NULL) { 1257 RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n"); 1258 return -1; 1259 } 1260 1261 priv->fle_pool = dev_priv->fle_pool; 1262 flc = &priv->flc_desc[0].flc; 1263 1264 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 1265 RTE_CACHE_LINE_SIZE); 1266 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 1267 RTE_LOG(ERR, PMD, "No Memory for aead key\n"); 1268 rte_free(priv); 1269 return -1; 1270 } 1271 memcpy(session->aead_key.data, aead_xform->key.data, 1272 aead_xform->key.length); 1273 1274 session->digest_length = aead_xform->digest_length; 1275 session->aead_key.length = aead_xform->key.length; 1276 ctxt->auth_only_len = aead_xform->aad_length; 1277 1278 aeaddata.key = (uint64_t)session->aead_key.data; 1279 aeaddata.keylen = session->aead_key.length; 1280 aeaddata.key_enc_flags = 0; 1281 aeaddata.key_type = RTA_DATA_IMM; 1282 1283 switch (aead_xform->algo) { 1284 case RTE_CRYPTO_AEAD_AES_GCM: 1285 aeaddata.algtype = OP_ALG_ALGSEL_AES; 1286 aeaddata.algmode = OP_ALG_AAI_GCM; 1287 session->cipher_alg = RTE_CRYPTO_AEAD_AES_GCM; 1288 break; 1289 case RTE_CRYPTO_AEAD_AES_CCM: 1290 RTE_LOG(ERR, PMD, "Crypto: Unsupported AEAD alg %u\n", 1291 aead_xform->algo); 1292 goto error_out; 1293 default: 1294 RTE_LOG(ERR, PMD, "Crypto: Undefined AEAD specified %u\n", 1295 aead_xform->algo); 1296 goto error_out; 1297 } 1298 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 1299 DIR_ENC : DIR_DEC; 1300 1301 priv->flc_desc[0].desc[0] = aeaddata.keylen; 1302 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 1303 MIN_JOB_DESC_SIZE, 1304 (unsigned int *)priv->flc_desc[0].desc, 1305 &priv->flc_desc[0].desc[1], 1); 1306 1307 if (err < 0) { 1308 PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths\n"); 1309 goto error_out; 1310 } 1311 if (priv->flc_desc[0].desc[1] & 1) { 1312 aeaddata.key_type = RTA_DATA_IMM; 1313 } else { 1314 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key); 1315 aeaddata.key_type = RTA_DATA_PTR; 1316 } 1317 priv->flc_desc[0].desc[0] = 0; 1318 priv->flc_desc[0].desc[1] = 0; 1319 1320 if (session->dir == DIR_ENC) 1321 bufsize = cnstr_shdsc_gcm_encap( 1322 priv->flc_desc[0].desc, 1, 0, 1323 &aeaddata, session->iv.length, 1324 session->digest_length); 1325 else 1326 bufsize = cnstr_shdsc_gcm_decap( 1327 priv->flc_desc[0].desc, 1, 0, 1328 &aeaddata, session->iv.length, 1329 session->digest_length); 1330 flc->word1_sdl = (uint8_t)bufsize; 1331 flc->word2_rflc_31_0 = lower_32_bits( 1332 (uint64_t)&(((struct dpaa2_sec_qp *) 1333 dev->data->queue_pairs[0])->rx_vq)); 1334 flc->word3_rflc_63_32 = upper_32_bits( 1335 (uint64_t)&(((struct dpaa2_sec_qp *) 1336 dev->data->queue_pairs[0])->rx_vq)); 1337 session->ctxt = priv; 1338 for (i = 0; i < bufsize; i++) 1339 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n", 1340 i, priv->flc_desc[0].desc[i]); 1341 1342 return 0; 1343 1344 error_out: 1345 rte_free(session->aead_key.data); 1346 rte_free(priv); 1347 return -1; 1348 } 1349 1350 1351 static int 1352 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, 1353 struct rte_crypto_sym_xform *xform, 1354 dpaa2_sec_session *session) 1355 { 1356 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt; 1357 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1358 struct alginfo authdata, cipherdata; 1359 unsigned int bufsize, i; 1360 struct ctxt_priv *priv; 1361 struct sec_flow_context *flc; 1362 struct rte_crypto_cipher_xform *cipher_xform; 1363 struct rte_crypto_auth_xform *auth_xform; 1364 int err; 1365 1366 PMD_INIT_FUNC_TRACE(); 1367 1368 if (session->ext_params.aead_ctxt.auth_cipher_text) { 1369 cipher_xform = &xform->cipher; 1370 auth_xform = &xform->next->auth; 1371 session->ctxt_type = 1372 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1373 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER; 1374 } else { 1375 cipher_xform = &xform->next->cipher; 1376 auth_xform = &xform->auth; 1377 session->ctxt_type = 1378 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1379 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH; 1380 } 1381 1382 /* Set IV parameters */ 1383 session->iv.offset = cipher_xform->iv.offset; 1384 session->iv.length = cipher_xform->iv.length; 1385 1386 /* For SEC AEAD only one descriptor is required */ 1387 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1388 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 1389 RTE_CACHE_LINE_SIZE); 1390 if (priv == NULL) { 1391 RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n"); 1392 return -1; 1393 } 1394 1395 priv->fle_pool = dev_priv->fle_pool; 1396 flc = &priv->flc_desc[0].flc; 1397 1398 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length, 1399 RTE_CACHE_LINE_SIZE); 1400 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) { 1401 RTE_LOG(ERR, PMD, "No Memory for cipher key\n"); 1402 rte_free(priv); 1403 return -1; 1404 } 1405 session->cipher_key.length = cipher_xform->key.length; 1406 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length, 1407 RTE_CACHE_LINE_SIZE); 1408 if (session->auth_key.data == NULL && auth_xform->key.length > 0) { 1409 RTE_LOG(ERR, PMD, "No Memory for auth key\n"); 1410 rte_free(session->cipher_key.data); 1411 rte_free(priv); 1412 return -1; 1413 } 1414 session->auth_key.length = auth_xform->key.length; 1415 memcpy(session->cipher_key.data, cipher_xform->key.data, 1416 cipher_xform->key.length); 1417 memcpy(session->auth_key.data, auth_xform->key.data, 1418 auth_xform->key.length); 1419 1420 authdata.key = (uint64_t)session->auth_key.data; 1421 authdata.keylen = session->auth_key.length; 1422 authdata.key_enc_flags = 0; 1423 authdata.key_type = RTA_DATA_IMM; 1424 1425 session->digest_length = auth_xform->digest_length; 1426 1427 switch (auth_xform->algo) { 1428 case RTE_CRYPTO_AUTH_SHA1_HMAC: 1429 authdata.algtype = OP_ALG_ALGSEL_SHA1; 1430 authdata.algmode = OP_ALG_AAI_HMAC; 1431 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 1432 break; 1433 case RTE_CRYPTO_AUTH_MD5_HMAC: 1434 authdata.algtype = OP_ALG_ALGSEL_MD5; 1435 authdata.algmode = OP_ALG_AAI_HMAC; 1436 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 1437 break; 1438 case RTE_CRYPTO_AUTH_SHA224_HMAC: 1439 authdata.algtype = OP_ALG_ALGSEL_SHA224; 1440 authdata.algmode = OP_ALG_AAI_HMAC; 1441 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 1442 break; 1443 case RTE_CRYPTO_AUTH_SHA256_HMAC: 1444 authdata.algtype = OP_ALG_ALGSEL_SHA256; 1445 authdata.algmode = OP_ALG_AAI_HMAC; 1446 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 1447 break; 1448 case RTE_CRYPTO_AUTH_SHA384_HMAC: 1449 authdata.algtype = OP_ALG_ALGSEL_SHA384; 1450 authdata.algmode = OP_ALG_AAI_HMAC; 1451 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 1452 break; 1453 case RTE_CRYPTO_AUTH_SHA512_HMAC: 1454 authdata.algtype = OP_ALG_ALGSEL_SHA512; 1455 authdata.algmode = OP_ALG_AAI_HMAC; 1456 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 1457 break; 1458 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 1459 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 1460 case RTE_CRYPTO_AUTH_NULL: 1461 case RTE_CRYPTO_AUTH_SHA1: 1462 case RTE_CRYPTO_AUTH_SHA256: 1463 case RTE_CRYPTO_AUTH_SHA512: 1464 case RTE_CRYPTO_AUTH_SHA224: 1465 case RTE_CRYPTO_AUTH_SHA384: 1466 case RTE_CRYPTO_AUTH_MD5: 1467 case RTE_CRYPTO_AUTH_AES_GMAC: 1468 case RTE_CRYPTO_AUTH_KASUMI_F9: 1469 case RTE_CRYPTO_AUTH_AES_CMAC: 1470 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 1471 case RTE_CRYPTO_AUTH_ZUC_EIA3: 1472 RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n", 1473 auth_xform->algo); 1474 goto error_out; 1475 default: 1476 RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n", 1477 auth_xform->algo); 1478 goto error_out; 1479 } 1480 cipherdata.key = (uint64_t)session->cipher_key.data; 1481 cipherdata.keylen = session->cipher_key.length; 1482 cipherdata.key_enc_flags = 0; 1483 cipherdata.key_type = RTA_DATA_IMM; 1484 1485 switch (cipher_xform->algo) { 1486 case RTE_CRYPTO_CIPHER_AES_CBC: 1487 cipherdata.algtype = OP_ALG_ALGSEL_AES; 1488 cipherdata.algmode = OP_ALG_AAI_CBC; 1489 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 1490 break; 1491 case RTE_CRYPTO_CIPHER_3DES_CBC: 1492 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 1493 cipherdata.algmode = OP_ALG_AAI_CBC; 1494 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 1495 break; 1496 case RTE_CRYPTO_CIPHER_AES_CTR: 1497 cipherdata.algtype = OP_ALG_ALGSEL_AES; 1498 cipherdata.algmode = OP_ALG_AAI_CTR; 1499 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 1500 break; 1501 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 1502 case RTE_CRYPTO_CIPHER_NULL: 1503 case RTE_CRYPTO_CIPHER_3DES_ECB: 1504 case RTE_CRYPTO_CIPHER_AES_ECB: 1505 case RTE_CRYPTO_CIPHER_KASUMI_F8: 1506 RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n", 1507 cipher_xform->algo); 1508 goto error_out; 1509 default: 1510 RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n", 1511 cipher_xform->algo); 1512 goto error_out; 1513 } 1514 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1515 DIR_ENC : DIR_DEC; 1516 1517 priv->flc_desc[0].desc[0] = cipherdata.keylen; 1518 priv->flc_desc[0].desc[1] = authdata.keylen; 1519 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 1520 MIN_JOB_DESC_SIZE, 1521 (unsigned int *)priv->flc_desc[0].desc, 1522 &priv->flc_desc[0].desc[2], 2); 1523 1524 if (err < 0) { 1525 PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths\n"); 1526 goto error_out; 1527 } 1528 if (priv->flc_desc[0].desc[2] & 1) { 1529 cipherdata.key_type = RTA_DATA_IMM; 1530 } else { 1531 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); 1532 cipherdata.key_type = RTA_DATA_PTR; 1533 } 1534 if (priv->flc_desc[0].desc[2] & (1 << 1)) { 1535 authdata.key_type = RTA_DATA_IMM; 1536 } else { 1537 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key); 1538 authdata.key_type = RTA_DATA_PTR; 1539 } 1540 priv->flc_desc[0].desc[0] = 0; 1541 priv->flc_desc[0].desc[1] = 0; 1542 priv->flc_desc[0].desc[2] = 0; 1543 1544 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) { 1545 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1, 1546 0, &cipherdata, &authdata, 1547 session->iv.length, 1548 ctxt->auth_only_len, 1549 session->digest_length, 1550 session->dir); 1551 } else { 1552 RTE_LOG(ERR, PMD, "Hash before cipher not supported\n"); 1553 goto error_out; 1554 } 1555 1556 flc->word1_sdl = (uint8_t)bufsize; 1557 flc->word2_rflc_31_0 = lower_32_bits( 1558 (uint64_t)&(((struct dpaa2_sec_qp *) 1559 dev->data->queue_pairs[0])->rx_vq)); 1560 flc->word3_rflc_63_32 = upper_32_bits( 1561 (uint64_t)&(((struct dpaa2_sec_qp *) 1562 dev->data->queue_pairs[0])->rx_vq)); 1563 session->ctxt = priv; 1564 for (i = 0; i < bufsize; i++) 1565 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n", 1566 i, priv->flc_desc[0].desc[i]); 1567 1568 return 0; 1569 1570 error_out: 1571 rte_free(session->cipher_key.data); 1572 rte_free(session->auth_key.data); 1573 rte_free(priv); 1574 return -1; 1575 } 1576 1577 static int 1578 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev, 1579 struct rte_crypto_sym_xform *xform, void *sess) 1580 { 1581 dpaa2_sec_session *session = sess; 1582 1583 PMD_INIT_FUNC_TRACE(); 1584 1585 if (unlikely(sess == NULL)) { 1586 RTE_LOG(ERR, PMD, "invalid session struct\n"); 1587 return -1; 1588 } 1589 1590 /* Default IV length = 0 */ 1591 session->iv.length = 0; 1592 1593 /* Cipher Only */ 1594 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 1595 session->ctxt_type = DPAA2_SEC_CIPHER; 1596 dpaa2_sec_cipher_init(dev, xform, session); 1597 1598 /* Authentication Only */ 1599 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 1600 xform->next == NULL) { 1601 session->ctxt_type = DPAA2_SEC_AUTH; 1602 dpaa2_sec_auth_init(dev, xform, session); 1603 1604 /* Cipher then Authenticate */ 1605 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 1606 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 1607 session->ext_params.aead_ctxt.auth_cipher_text = true; 1608 dpaa2_sec_aead_chain_init(dev, xform, session); 1609 1610 /* Authenticate then Cipher */ 1611 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 1612 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 1613 session->ext_params.aead_ctxt.auth_cipher_text = false; 1614 dpaa2_sec_aead_chain_init(dev, xform, session); 1615 1616 /* AEAD operation for AES-GCM kind of Algorithms */ 1617 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && 1618 xform->next == NULL) { 1619 dpaa2_sec_aead_init(dev, xform, session); 1620 1621 } else { 1622 RTE_LOG(ERR, PMD, "Invalid crypto type\n"); 1623 return -EINVAL; 1624 } 1625 1626 return 0; 1627 } 1628 1629 static int 1630 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, 1631 struct rte_security_session_conf *conf, 1632 void *sess) 1633 { 1634 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec; 1635 struct rte_crypto_auth_xform *auth_xform; 1636 struct rte_crypto_cipher_xform *cipher_xform; 1637 dpaa2_sec_session *session = (dpaa2_sec_session *)sess; 1638 struct ctxt_priv *priv; 1639 struct ipsec_encap_pdb encap_pdb; 1640 struct ipsec_decap_pdb decap_pdb; 1641 struct alginfo authdata, cipherdata; 1642 unsigned int bufsize; 1643 struct sec_flow_context *flc; 1644 1645 PMD_INIT_FUNC_TRACE(); 1646 1647 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 1648 cipher_xform = &conf->crypto_xform->cipher; 1649 auth_xform = &conf->crypto_xform->next->auth; 1650 } else { 1651 auth_xform = &conf->crypto_xform->auth; 1652 cipher_xform = &conf->crypto_xform->next->cipher; 1653 } 1654 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1655 sizeof(struct ctxt_priv) + 1656 sizeof(struct sec_flc_desc), 1657 RTE_CACHE_LINE_SIZE); 1658 1659 if (priv == NULL) { 1660 RTE_LOG(ERR, PMD, "\nNo memory for priv CTXT"); 1661 return -ENOMEM; 1662 } 1663 1664 flc = &priv->flc_desc[0].flc; 1665 1666 session->ctxt_type = DPAA2_SEC_IPSEC; 1667 session->cipher_key.data = rte_zmalloc(NULL, 1668 cipher_xform->key.length, 1669 RTE_CACHE_LINE_SIZE); 1670 if (session->cipher_key.data == NULL && 1671 cipher_xform->key.length > 0) { 1672 RTE_LOG(ERR, PMD, "No Memory for cipher key\n"); 1673 rte_free(priv); 1674 return -ENOMEM; 1675 } 1676 1677 session->cipher_key.length = cipher_xform->key.length; 1678 session->auth_key.data = rte_zmalloc(NULL, 1679 auth_xform->key.length, 1680 RTE_CACHE_LINE_SIZE); 1681 if (session->auth_key.data == NULL && 1682 auth_xform->key.length > 0) { 1683 RTE_LOG(ERR, PMD, "No Memory for auth key\n"); 1684 rte_free(session->cipher_key.data); 1685 rte_free(priv); 1686 return -ENOMEM; 1687 } 1688 session->auth_key.length = auth_xform->key.length; 1689 memcpy(session->cipher_key.data, cipher_xform->key.data, 1690 cipher_xform->key.length); 1691 memcpy(session->auth_key.data, auth_xform->key.data, 1692 auth_xform->key.length); 1693 1694 authdata.key = (uint64_t)session->auth_key.data; 1695 authdata.keylen = session->auth_key.length; 1696 authdata.key_enc_flags = 0; 1697 authdata.key_type = RTA_DATA_IMM; 1698 switch (auth_xform->algo) { 1699 case RTE_CRYPTO_AUTH_SHA1_HMAC: 1700 authdata.algtype = OP_PCL_IPSEC_HMAC_SHA1_96; 1701 authdata.algmode = OP_ALG_AAI_HMAC; 1702 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 1703 break; 1704 case RTE_CRYPTO_AUTH_MD5_HMAC: 1705 authdata.algtype = OP_PCL_IPSEC_HMAC_MD5_96; 1706 authdata.algmode = OP_ALG_AAI_HMAC; 1707 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 1708 break; 1709 case RTE_CRYPTO_AUTH_SHA256_HMAC: 1710 authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128; 1711 authdata.algmode = OP_ALG_AAI_HMAC; 1712 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 1713 break; 1714 case RTE_CRYPTO_AUTH_SHA384_HMAC: 1715 authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192; 1716 authdata.algmode = OP_ALG_AAI_HMAC; 1717 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 1718 break; 1719 case RTE_CRYPTO_AUTH_SHA512_HMAC: 1720 authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256; 1721 authdata.algmode = OP_ALG_AAI_HMAC; 1722 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 1723 break; 1724 case RTE_CRYPTO_AUTH_AES_CMAC: 1725 authdata.algtype = OP_PCL_IPSEC_AES_CMAC_96; 1726 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC; 1727 break; 1728 case RTE_CRYPTO_AUTH_NULL: 1729 authdata.algtype = OP_PCL_IPSEC_HMAC_NULL; 1730 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 1731 break; 1732 case RTE_CRYPTO_AUTH_SHA224_HMAC: 1733 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 1734 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 1735 case RTE_CRYPTO_AUTH_SHA1: 1736 case RTE_CRYPTO_AUTH_SHA256: 1737 case RTE_CRYPTO_AUTH_SHA512: 1738 case RTE_CRYPTO_AUTH_SHA224: 1739 case RTE_CRYPTO_AUTH_SHA384: 1740 case RTE_CRYPTO_AUTH_MD5: 1741 case RTE_CRYPTO_AUTH_AES_GMAC: 1742 case RTE_CRYPTO_AUTH_KASUMI_F9: 1743 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 1744 case RTE_CRYPTO_AUTH_ZUC_EIA3: 1745 RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n", 1746 auth_xform->algo); 1747 goto out; 1748 default: 1749 RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n", 1750 auth_xform->algo); 1751 goto out; 1752 } 1753 cipherdata.key = (uint64_t)session->cipher_key.data; 1754 cipherdata.keylen = session->cipher_key.length; 1755 cipherdata.key_enc_flags = 0; 1756 cipherdata.key_type = RTA_DATA_IMM; 1757 1758 switch (cipher_xform->algo) { 1759 case RTE_CRYPTO_CIPHER_AES_CBC: 1760 cipherdata.algtype = OP_PCL_IPSEC_AES_CBC; 1761 cipherdata.algmode = OP_ALG_AAI_CBC; 1762 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 1763 break; 1764 case RTE_CRYPTO_CIPHER_3DES_CBC: 1765 cipherdata.algtype = OP_PCL_IPSEC_3DES; 1766 cipherdata.algmode = OP_ALG_AAI_CBC; 1767 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 1768 break; 1769 case RTE_CRYPTO_CIPHER_AES_CTR: 1770 cipherdata.algtype = OP_PCL_IPSEC_AES_CTR; 1771 cipherdata.algmode = OP_ALG_AAI_CTR; 1772 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 1773 break; 1774 case RTE_CRYPTO_CIPHER_NULL: 1775 cipherdata.algtype = OP_PCL_IPSEC_NULL; 1776 break; 1777 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 1778 case RTE_CRYPTO_CIPHER_3DES_ECB: 1779 case RTE_CRYPTO_CIPHER_AES_ECB: 1780 case RTE_CRYPTO_CIPHER_KASUMI_F8: 1781 RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n", 1782 cipher_xform->algo); 1783 goto out; 1784 default: 1785 RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n", 1786 cipher_xform->algo); 1787 goto out; 1788 } 1789 1790 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 1791 struct ip ip4_hdr; 1792 1793 flc->dhr = SEC_FLC_DHR_OUTBOUND; 1794 ip4_hdr.ip_v = IPVERSION; 1795 ip4_hdr.ip_hl = 5; 1796 ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr)); 1797 ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp; 1798 ip4_hdr.ip_id = 0; 1799 ip4_hdr.ip_off = 0; 1800 ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl; 1801 ip4_hdr.ip_p = 0x32; 1802 ip4_hdr.ip_sum = 0; 1803 ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip; 1804 ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip; 1805 ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)&ip4_hdr, 1806 sizeof(struct ip)); 1807 1808 /* For Sec Proto only one descriptor is required. */ 1809 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb)); 1810 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) | 1811 PDBOPTS_ESP_OIHI_PDB_INL | 1812 PDBOPTS_ESP_IVSRC | 1813 PDBHMO_ESP_ENCAP_DTTL; 1814 encap_pdb.spi = ipsec_xform->spi; 1815 encap_pdb.ip_hdr_len = sizeof(struct ip); 1816 1817 session->dir = DIR_ENC; 1818 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc, 1819 1, 0, &encap_pdb, 1820 (uint8_t *)&ip4_hdr, 1821 &cipherdata, &authdata); 1822 } else if (ipsec_xform->direction == 1823 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { 1824 flc->dhr = SEC_FLC_DHR_INBOUND; 1825 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb)); 1826 decap_pdb.options = sizeof(struct ip) << 16; 1827 session->dir = DIR_DEC; 1828 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc, 1829 1, 0, &decap_pdb, &cipherdata, &authdata); 1830 } else 1831 goto out; 1832 flc->word1_sdl = (uint8_t)bufsize; 1833 1834 /* Enable the stashing control bit */ 1835 DPAA2_SET_FLC_RSC(flc); 1836 flc->word2_rflc_31_0 = lower_32_bits( 1837 (uint64_t)&(((struct dpaa2_sec_qp *) 1838 dev->data->queue_pairs[0])->rx_vq) | 0x14); 1839 flc->word3_rflc_63_32 = upper_32_bits( 1840 (uint64_t)&(((struct dpaa2_sec_qp *) 1841 dev->data->queue_pairs[0])->rx_vq)); 1842 1843 /* Set EWS bit i.e. enable write-safe */ 1844 DPAA2_SET_FLC_EWS(flc); 1845 /* Set BS = 1 i.e reuse input buffers as output buffers */ 1846 DPAA2_SET_FLC_REUSE_BS(flc); 1847 /* Set FF = 10; reuse input buffers if they provide sufficient space */ 1848 DPAA2_SET_FLC_REUSE_FF(flc); 1849 1850 session->ctxt = priv; 1851 1852 return 0; 1853 out: 1854 rte_free(session->auth_key.data); 1855 rte_free(session->cipher_key.data); 1856 rte_free(priv); 1857 return -1; 1858 } 1859 1860 static int 1861 dpaa2_sec_security_session_create(void *dev, 1862 struct rte_security_session_conf *conf, 1863 struct rte_security_session *sess, 1864 struct rte_mempool *mempool) 1865 { 1866 void *sess_private_data; 1867 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; 1868 int ret; 1869 1870 if (rte_mempool_get(mempool, &sess_private_data)) { 1871 CDEV_LOG_ERR( 1872 "Couldn't get object from session mempool"); 1873 return -ENOMEM; 1874 } 1875 1876 switch (conf->protocol) { 1877 case RTE_SECURITY_PROTOCOL_IPSEC: 1878 ret = dpaa2_sec_set_ipsec_session(cdev, conf, 1879 sess_private_data); 1880 break; 1881 case RTE_SECURITY_PROTOCOL_MACSEC: 1882 return -ENOTSUP; 1883 default: 1884 return -EINVAL; 1885 } 1886 if (ret != 0) { 1887 PMD_DRV_LOG(ERR, 1888 "DPAA2 PMD: failed to configure session parameters"); 1889 1890 /* Return session to mempool */ 1891 rte_mempool_put(mempool, sess_private_data); 1892 return ret; 1893 } 1894 1895 set_sec_session_private_data(sess, sess_private_data); 1896 1897 return ret; 1898 } 1899 1900 /** Clear the memory of session so it doesn't leave key material behind */ 1901 static int 1902 dpaa2_sec_security_session_destroy(void *dev __rte_unused, 1903 struct rte_security_session *sess) 1904 { 1905 PMD_INIT_FUNC_TRACE(); 1906 void *sess_priv = get_sec_session_private_data(sess); 1907 1908 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv; 1909 1910 if (sess_priv) { 1911 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 1912 1913 rte_free(s->ctxt); 1914 rte_free(s->cipher_key.data); 1915 rte_free(s->auth_key.data); 1916 memset(sess, 0, sizeof(dpaa2_sec_session)); 1917 set_sec_session_private_data(sess, NULL); 1918 rte_mempool_put(sess_mp, sess_priv); 1919 } 1920 return 0; 1921 } 1922 1923 static int 1924 dpaa2_sec_session_configure(struct rte_cryptodev *dev, 1925 struct rte_crypto_sym_xform *xform, 1926 struct rte_cryptodev_sym_session *sess, 1927 struct rte_mempool *mempool) 1928 { 1929 void *sess_private_data; 1930 int ret; 1931 1932 if (rte_mempool_get(mempool, &sess_private_data)) { 1933 CDEV_LOG_ERR( 1934 "Couldn't get object from session mempool"); 1935 return -ENOMEM; 1936 } 1937 1938 ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data); 1939 if (ret != 0) { 1940 PMD_DRV_LOG(ERR, "DPAA2 PMD: failed to configure " 1941 "session parameters"); 1942 1943 /* Return session to mempool */ 1944 rte_mempool_put(mempool, sess_private_data); 1945 return ret; 1946 } 1947 1948 set_session_private_data(sess, dev->driver_id, 1949 sess_private_data); 1950 1951 return 0; 1952 } 1953 1954 /** Clear the memory of session so it doesn't leave key material behind */ 1955 static void 1956 dpaa2_sec_session_clear(struct rte_cryptodev *dev, 1957 struct rte_cryptodev_sym_session *sess) 1958 { 1959 PMD_INIT_FUNC_TRACE(); 1960 uint8_t index = dev->driver_id; 1961 void *sess_priv = get_session_private_data(sess, index); 1962 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv; 1963 1964 if (sess_priv) { 1965 rte_free(s->ctxt); 1966 rte_free(s->cipher_key.data); 1967 rte_free(s->auth_key.data); 1968 memset(sess, 0, sizeof(dpaa2_sec_session)); 1969 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 1970 set_session_private_data(sess, index, NULL); 1971 rte_mempool_put(sess_mp, sess_priv); 1972 } 1973 } 1974 1975 static int 1976 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused, 1977 struct rte_cryptodev_config *config __rte_unused) 1978 { 1979 PMD_INIT_FUNC_TRACE(); 1980 1981 return 0; 1982 } 1983 1984 static int 1985 dpaa2_sec_dev_start(struct rte_cryptodev *dev) 1986 { 1987 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 1988 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 1989 struct dpseci_attr attr; 1990 struct dpaa2_queue *dpaa2_q; 1991 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 1992 dev->data->queue_pairs; 1993 struct dpseci_rx_queue_attr rx_attr; 1994 struct dpseci_tx_queue_attr tx_attr; 1995 int ret, i; 1996 1997 PMD_INIT_FUNC_TRACE(); 1998 1999 memset(&attr, 0, sizeof(struct dpseci_attr)); 2000 2001 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token); 2002 if (ret) { 2003 PMD_INIT_LOG(ERR, "DPSECI with HW_ID = %d ENABLE FAILED\n", 2004 priv->hw_id); 2005 goto get_attr_failure; 2006 } 2007 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr); 2008 if (ret) { 2009 PMD_INIT_LOG(ERR, 2010 "DPSEC ATTRIBUTE READ FAILED, disabling DPSEC\n"); 2011 goto get_attr_failure; 2012 } 2013 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) { 2014 dpaa2_q = &qp[i]->rx_vq; 2015 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 2016 &rx_attr); 2017 dpaa2_q->fqid = rx_attr.fqid; 2018 PMD_INIT_LOG(DEBUG, "rx_fqid: %d", dpaa2_q->fqid); 2019 } 2020 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) { 2021 dpaa2_q = &qp[i]->tx_vq; 2022 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 2023 &tx_attr); 2024 dpaa2_q->fqid = tx_attr.fqid; 2025 PMD_INIT_LOG(DEBUG, "tx_fqid: %d", dpaa2_q->fqid); 2026 } 2027 2028 return 0; 2029 get_attr_failure: 2030 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 2031 return -1; 2032 } 2033 2034 static void 2035 dpaa2_sec_dev_stop(struct rte_cryptodev *dev) 2036 { 2037 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 2038 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 2039 int ret; 2040 2041 PMD_INIT_FUNC_TRACE(); 2042 2043 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 2044 if (ret) { 2045 PMD_INIT_LOG(ERR, "Failure in disabling dpseci %d device", 2046 priv->hw_id); 2047 return; 2048 } 2049 2050 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token); 2051 if (ret < 0) { 2052 PMD_INIT_LOG(ERR, "SEC Device cannot be reset:Error = %0x\n", 2053 ret); 2054 return; 2055 } 2056 } 2057 2058 static int 2059 dpaa2_sec_dev_close(struct rte_cryptodev *dev) 2060 { 2061 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 2062 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 2063 int ret; 2064 2065 PMD_INIT_FUNC_TRACE(); 2066 2067 /* Function is reverse of dpaa2_sec_dev_init. 2068 * It does the following: 2069 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id 2070 * 2. Close the DPSECI device 2071 * 3. Free the allocated resources. 2072 */ 2073 2074 /*Close the device at underlying layer*/ 2075 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token); 2076 if (ret) { 2077 PMD_INIT_LOG(ERR, "Failure closing dpseci device with" 2078 " error code %d\n", ret); 2079 return -1; 2080 } 2081 2082 /*Free the allocated memory for ethernet private data and dpseci*/ 2083 priv->hw = NULL; 2084 rte_free(dpseci); 2085 2086 return 0; 2087 } 2088 2089 static void 2090 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev, 2091 struct rte_cryptodev_info *info) 2092 { 2093 struct dpaa2_sec_dev_private *internals = dev->data->dev_private; 2094 2095 PMD_INIT_FUNC_TRACE(); 2096 if (info != NULL) { 2097 info->max_nb_queue_pairs = internals->max_nb_queue_pairs; 2098 info->feature_flags = dev->feature_flags; 2099 info->capabilities = dpaa2_sec_capabilities; 2100 info->sym.max_nb_sessions = internals->max_nb_sessions; 2101 info->driver_id = cryptodev_driver_id; 2102 } 2103 } 2104 2105 static 2106 void dpaa2_sec_stats_get(struct rte_cryptodev *dev, 2107 struct rte_cryptodev_stats *stats) 2108 { 2109 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 2110 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 2111 struct dpseci_sec_counters counters = {0}; 2112 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 2113 dev->data->queue_pairs; 2114 int ret, i; 2115 2116 PMD_INIT_FUNC_TRACE(); 2117 if (stats == NULL) { 2118 PMD_DRV_LOG(ERR, "invalid stats ptr NULL"); 2119 return; 2120 } 2121 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 2122 if (qp[i] == NULL) { 2123 PMD_DRV_LOG(DEBUG, "Uninitialised queue pair"); 2124 continue; 2125 } 2126 2127 stats->enqueued_count += qp[i]->tx_vq.tx_pkts; 2128 stats->dequeued_count += qp[i]->rx_vq.rx_pkts; 2129 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts; 2130 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts; 2131 } 2132 2133 ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token, 2134 &counters); 2135 if (ret) { 2136 PMD_DRV_LOG(ERR, "dpseci_get_sec_counters failed\n"); 2137 } else { 2138 PMD_DRV_LOG(INFO, "dpseci hw stats:" 2139 "\n\tNumber of Requests Dequeued = %lu" 2140 "\n\tNumber of Outbound Encrypt Requests = %lu" 2141 "\n\tNumber of Inbound Decrypt Requests = %lu" 2142 "\n\tNumber of Outbound Bytes Encrypted = %lu" 2143 "\n\tNumber of Outbound Bytes Protected = %lu" 2144 "\n\tNumber of Inbound Bytes Decrypted = %lu" 2145 "\n\tNumber of Inbound Bytes Validated = %lu", 2146 counters.dequeued_requests, 2147 counters.ob_enc_requests, 2148 counters.ib_dec_requests, 2149 counters.ob_enc_bytes, 2150 counters.ob_prot_bytes, 2151 counters.ib_dec_bytes, 2152 counters.ib_valid_bytes); 2153 } 2154 } 2155 2156 static 2157 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev) 2158 { 2159 int i; 2160 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 2161 (dev->data->queue_pairs); 2162 2163 PMD_INIT_FUNC_TRACE(); 2164 2165 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 2166 if (qp[i] == NULL) { 2167 PMD_DRV_LOG(DEBUG, "Uninitialised queue pair"); 2168 continue; 2169 } 2170 qp[i]->tx_vq.rx_pkts = 0; 2171 qp[i]->tx_vq.tx_pkts = 0; 2172 qp[i]->tx_vq.err_pkts = 0; 2173 qp[i]->rx_vq.rx_pkts = 0; 2174 qp[i]->rx_vq.tx_pkts = 0; 2175 qp[i]->rx_vq.err_pkts = 0; 2176 } 2177 } 2178 2179 static struct rte_cryptodev_ops crypto_ops = { 2180 .dev_configure = dpaa2_sec_dev_configure, 2181 .dev_start = dpaa2_sec_dev_start, 2182 .dev_stop = dpaa2_sec_dev_stop, 2183 .dev_close = dpaa2_sec_dev_close, 2184 .dev_infos_get = dpaa2_sec_dev_infos_get, 2185 .stats_get = dpaa2_sec_stats_get, 2186 .stats_reset = dpaa2_sec_stats_reset, 2187 .queue_pair_setup = dpaa2_sec_queue_pair_setup, 2188 .queue_pair_release = dpaa2_sec_queue_pair_release, 2189 .queue_pair_start = dpaa2_sec_queue_pair_start, 2190 .queue_pair_stop = dpaa2_sec_queue_pair_stop, 2191 .queue_pair_count = dpaa2_sec_queue_pair_count, 2192 .session_get_size = dpaa2_sec_session_get_size, 2193 .session_configure = dpaa2_sec_session_configure, 2194 .session_clear = dpaa2_sec_session_clear, 2195 }; 2196 2197 static const struct rte_security_capability * 2198 dpaa2_sec_capabilities_get(void *device __rte_unused) 2199 { 2200 return dpaa2_sec_security_cap; 2201 } 2202 2203 struct rte_security_ops dpaa2_sec_security_ops = { 2204 .session_create = dpaa2_sec_security_session_create, 2205 .session_update = NULL, 2206 .session_stats_get = NULL, 2207 .session_destroy = dpaa2_sec_security_session_destroy, 2208 .set_pkt_metadata = NULL, 2209 .capabilities_get = dpaa2_sec_capabilities_get 2210 }; 2211 2212 static int 2213 dpaa2_sec_uninit(const struct rte_cryptodev *dev) 2214 { 2215 struct dpaa2_sec_dev_private *internals = dev->data->dev_private; 2216 2217 rte_free(dev->security_ctx); 2218 2219 rte_mempool_free(internals->fle_pool); 2220 2221 PMD_INIT_LOG(INFO, "Closing DPAA2_SEC device %s on numa socket %u\n", 2222 dev->data->name, rte_socket_id()); 2223 2224 return 0; 2225 } 2226 2227 static int 2228 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev) 2229 { 2230 struct dpaa2_sec_dev_private *internals; 2231 struct rte_device *dev = cryptodev->device; 2232 struct rte_dpaa2_device *dpaa2_dev; 2233 struct rte_security_ctx *security_instance; 2234 struct fsl_mc_io *dpseci; 2235 uint16_t token; 2236 struct dpseci_attr attr; 2237 int retcode, hw_id; 2238 char str[20]; 2239 2240 PMD_INIT_FUNC_TRACE(); 2241 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 2242 if (dpaa2_dev == NULL) { 2243 PMD_INIT_LOG(ERR, "dpaa2_device not found\n"); 2244 return -1; 2245 } 2246 hw_id = dpaa2_dev->object_id; 2247 2248 cryptodev->driver_id = cryptodev_driver_id; 2249 cryptodev->dev_ops = &crypto_ops; 2250 2251 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst; 2252 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst; 2253 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 2254 RTE_CRYPTODEV_FF_HW_ACCELERATED | 2255 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 2256 RTE_CRYPTODEV_FF_SECURITY; 2257 2258 internals = cryptodev->data->dev_private; 2259 internals->max_nb_sessions = RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS; 2260 2261 /* 2262 * For secondary processes, we don't initialise any further as primary 2263 * has already done this work. Only check we don't need a different 2264 * RX function 2265 */ 2266 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2267 PMD_INIT_LOG(DEBUG, "Device already init by primary process"); 2268 return 0; 2269 } 2270 2271 /* Initialize security_ctx only for primary process*/ 2272 security_instance = rte_malloc("rte_security_instances_ops", 2273 sizeof(struct rte_security_ctx), 0); 2274 if (security_instance == NULL) 2275 return -ENOMEM; 2276 security_instance->device = (void *)cryptodev; 2277 security_instance->ops = &dpaa2_sec_security_ops; 2278 security_instance->sess_cnt = 0; 2279 cryptodev->security_ctx = security_instance; 2280 2281 /*Open the rte device via MC and save the handle for further use*/ 2282 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1, 2283 sizeof(struct fsl_mc_io), 0); 2284 if (!dpseci) { 2285 PMD_INIT_LOG(ERR, 2286 "Error in allocating the memory for dpsec object"); 2287 return -1; 2288 } 2289 dpseci->regs = rte_mcp_ptr_list[0]; 2290 2291 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token); 2292 if (retcode != 0) { 2293 PMD_INIT_LOG(ERR, "Cannot open the dpsec device: Error = %x", 2294 retcode); 2295 goto init_error; 2296 } 2297 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr); 2298 if (retcode != 0) { 2299 PMD_INIT_LOG(ERR, 2300 "Cannot get dpsec device attributed: Error = %x", 2301 retcode); 2302 goto init_error; 2303 } 2304 sprintf(cryptodev->data->name, "dpsec-%u", hw_id); 2305 2306 internals->max_nb_queue_pairs = attr.num_tx_queues; 2307 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs; 2308 internals->hw = dpseci; 2309 internals->token = token; 2310 2311 sprintf(str, "fle_pool_%d", cryptodev->data->dev_id); 2312 internals->fle_pool = rte_mempool_create((const char *)str, 2313 FLE_POOL_NUM_BUFS, 2314 FLE_POOL_BUF_SIZE, 2315 FLE_POOL_CACHE_SIZE, 0, 2316 NULL, NULL, NULL, NULL, 2317 SOCKET_ID_ANY, 0); 2318 if (!internals->fle_pool) { 2319 RTE_LOG(ERR, PMD, "%s create failed\n", str); 2320 goto init_error; 2321 } 2322 2323 PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name); 2324 return 0; 2325 2326 init_error: 2327 PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name); 2328 2329 /* dpaa2_sec_uninit(crypto_dev_name); */ 2330 return -EFAULT; 2331 } 2332 2333 static int 2334 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv, 2335 struct rte_dpaa2_device *dpaa2_dev) 2336 { 2337 struct rte_cryptodev *cryptodev; 2338 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 2339 2340 int retval; 2341 2342 sprintf(cryptodev_name, "dpsec-%d", dpaa2_dev->object_id); 2343 2344 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id()); 2345 if (cryptodev == NULL) 2346 return -ENOMEM; 2347 2348 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 2349 cryptodev->data->dev_private = rte_zmalloc_socket( 2350 "cryptodev private structure", 2351 sizeof(struct dpaa2_sec_dev_private), 2352 RTE_CACHE_LINE_SIZE, 2353 rte_socket_id()); 2354 2355 if (cryptodev->data->dev_private == NULL) 2356 rte_panic("Cannot allocate memzone for private " 2357 "device data"); 2358 } 2359 2360 dpaa2_dev->cryptodev = cryptodev; 2361 cryptodev->device = &dpaa2_dev->device; 2362 cryptodev->device->driver = &dpaa2_drv->driver; 2363 2364 /* init user callbacks */ 2365 TAILQ_INIT(&(cryptodev->link_intr_cbs)); 2366 2367 /* Invoke PMD device initialization function */ 2368 retval = dpaa2_sec_dev_init(cryptodev); 2369 if (retval == 0) 2370 return 0; 2371 2372 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 2373 rte_free(cryptodev->data->dev_private); 2374 2375 cryptodev->attached = RTE_CRYPTODEV_DETACHED; 2376 2377 return -ENXIO; 2378 } 2379 2380 static int 2381 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev) 2382 { 2383 struct rte_cryptodev *cryptodev; 2384 int ret; 2385 2386 cryptodev = dpaa2_dev->cryptodev; 2387 if (cryptodev == NULL) 2388 return -ENODEV; 2389 2390 ret = dpaa2_sec_uninit(cryptodev); 2391 if (ret) 2392 return ret; 2393 2394 return rte_cryptodev_pmd_destroy(cryptodev); 2395 } 2396 2397 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = { 2398 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA, 2399 .drv_type = DPAA2_CRYPTO, 2400 .driver = { 2401 .name = "DPAA2 SEC PMD" 2402 }, 2403 .probe = cryptodev_dpaa2_sec_probe, 2404 .remove = cryptodev_dpaa2_sec_remove, 2405 }; 2406 2407 static struct cryptodev_driver dpaa2_sec_crypto_drv; 2408 2409 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver); 2410 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, rte_dpaa2_sec_driver, 2411 cryptodev_driver_id); 2412