1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2016 NXP 5 * 6 */ 7 8 #include <time.h> 9 #include <net/if.h> 10 11 #include <rte_mbuf.h> 12 #include <rte_cryptodev.h> 13 #include <rte_security_driver.h> 14 #include <rte_malloc.h> 15 #include <rte_memcpy.h> 16 #include <rte_string_fns.h> 17 #include <rte_cycles.h> 18 #include <rte_kvargs.h> 19 #include <rte_dev.h> 20 #include <rte_cryptodev_pmd.h> 21 #include <rte_common.h> 22 #include <rte_fslmc.h> 23 #include <fslmc_vfio.h> 24 #include <dpaa2_hw_pvt.h> 25 #include <dpaa2_hw_dpio.h> 26 #include <dpaa2_hw_mempool.h> 27 #include <fsl_dpseci.h> 28 #include <fsl_mc_sys.h> 29 30 #include "dpaa2_sec_priv.h" 31 #include "dpaa2_sec_logs.h" 32 33 /* RTA header files */ 34 #include <hw/desc/ipsec.h> 35 #include <hw/desc/algo.h> 36 37 /* Minimum job descriptor consists of a oneword job descriptor HEADER and 38 * a pointer to the shared descriptor 39 */ 40 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ) 41 #define FSL_VENDOR_ID 0x1957 42 #define FSL_DEVICE_ID 0x410 43 #define FSL_SUBSYSTEM_SEC 1 44 #define FSL_MC_DPSECI_DEVID 3 45 46 #define NO_PREFETCH 0 47 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */ 48 #define FLE_POOL_NUM_BUFS 32000 49 #define FLE_POOL_BUF_SIZE 256 50 #define FLE_POOL_CACHE_SIZE 512 51 #define SEC_FLC_DHR_OUTBOUND -114 52 #define SEC_FLC_DHR_INBOUND 0 53 54 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8; 55 56 static uint8_t cryptodev_driver_id; 57 58 static inline int 59 build_proto_fd(dpaa2_sec_session *sess, 60 struct rte_crypto_op *op, 61 struct qbman_fd *fd, uint16_t bpid) 62 { 63 struct rte_crypto_sym_op *sym_op = op->sym; 64 struct ctxt_priv *priv = sess->ctxt; 65 struct sec_flow_context *flc; 66 struct rte_mbuf *mbuf = sym_op->m_src; 67 68 if (likely(bpid < MAX_BPID)) 69 DPAA2_SET_FD_BPID(fd, bpid); 70 else 71 DPAA2_SET_FD_IVP(fd); 72 73 /* Save the shared descriptor */ 74 flc = &priv->flc_desc[0].flc; 75 76 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 77 DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off); 78 DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len); 79 DPAA2_SET_FD_FLC(fd, ((uint64_t)flc)); 80 81 /* save physical address of mbuf */ 82 op->sym->aead.digest.phys_addr = mbuf->buf_iova; 83 mbuf->buf_iova = (uint64_t)op; 84 85 return 0; 86 } 87 88 static inline int 89 build_authenc_gcm_fd(dpaa2_sec_session *sess, 90 struct rte_crypto_op *op, 91 struct qbman_fd *fd, uint16_t bpid) 92 { 93 struct rte_crypto_sym_op *sym_op = op->sym; 94 struct ctxt_priv *priv = sess->ctxt; 95 struct qbman_fle *fle, *sge; 96 struct sec_flow_context *flc; 97 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len; 98 int icv_len = sess->digest_length, retval; 99 uint8_t *old_icv; 100 struct rte_mbuf *dst; 101 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 102 sess->iv.offset); 103 104 PMD_INIT_FUNC_TRACE(); 105 106 if (sym_op->m_dst) 107 dst = sym_op->m_dst; 108 else 109 dst = sym_op->m_src; 110 111 /* TODO we are using the first FLE entry to store Mbuf and session ctxt. 112 * Currently we donot know which FLE has the mbuf stored. 113 * So while retreiving we can go back 1 FLE from the FD -ADDR 114 * to get the MBUF Addr from the previous FLE. 115 * We can have a better approach to use the inline Mbuf 116 */ 117 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 118 if (retval) { 119 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n"); 120 return -1; 121 } 122 memset(fle, 0, FLE_POOL_BUF_SIZE); 123 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op)); 124 DPAA2_FLE_SAVE_CTXT(fle, priv); 125 fle = fle + 1; 126 sge = fle + 2; 127 if (likely(bpid < MAX_BPID)) { 128 DPAA2_SET_FD_BPID(fd, bpid); 129 DPAA2_SET_FLE_BPID(fle, bpid); 130 DPAA2_SET_FLE_BPID(fle + 1, bpid); 131 DPAA2_SET_FLE_BPID(sge, bpid); 132 DPAA2_SET_FLE_BPID(sge + 1, bpid); 133 DPAA2_SET_FLE_BPID(sge + 2, bpid); 134 DPAA2_SET_FLE_BPID(sge + 3, bpid); 135 } else { 136 DPAA2_SET_FD_IVP(fd); 137 DPAA2_SET_FLE_IVP(fle); 138 DPAA2_SET_FLE_IVP((fle + 1)); 139 DPAA2_SET_FLE_IVP(sge); 140 DPAA2_SET_FLE_IVP((sge + 1)); 141 DPAA2_SET_FLE_IVP((sge + 2)); 142 DPAA2_SET_FLE_IVP((sge + 3)); 143 } 144 145 /* Save the shared descriptor */ 146 flc = &priv->flc_desc[0].flc; 147 /* Configure FD as a FRAME LIST */ 148 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 149 DPAA2_SET_FD_COMPOUND_FMT(fd); 150 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 151 152 PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n" 153 "iv-len=%d data_off: 0x%x\n", 154 sym_op->aead.data.offset, 155 sym_op->aead.data.length, 156 sym_op->aead.digest.length, 157 sess->iv.length, 158 sym_op->m_src->data_off); 159 160 /* Configure Output FLE with Scatter/Gather Entry */ 161 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 162 if (auth_only_len) 163 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 164 fle->length = (sess->dir == DIR_ENC) ? 165 (sym_op->aead.data.length + icv_len + auth_only_len) : 166 sym_op->aead.data.length + auth_only_len; 167 168 DPAA2_SET_FLE_SG_EXT(fle); 169 170 /* Configure Output SGE for Encap/Decap */ 171 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 172 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + 173 dst->data_off - auth_only_len); 174 sge->length = sym_op->aead.data.length + auth_only_len; 175 176 if (sess->dir == DIR_ENC) { 177 sge++; 178 DPAA2_SET_FLE_ADDR(sge, 179 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data)); 180 sge->length = sess->digest_length; 181 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length + 182 sess->iv.length + auth_only_len)); 183 } 184 DPAA2_SET_FLE_FIN(sge); 185 186 sge++; 187 fle++; 188 189 /* Configure Input FLE with Scatter/Gather Entry */ 190 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 191 DPAA2_SET_FLE_SG_EXT(fle); 192 DPAA2_SET_FLE_FIN(fle); 193 fle->length = (sess->dir == DIR_ENC) ? 194 (sym_op->aead.data.length + sess->iv.length + auth_only_len) : 195 (sym_op->aead.data.length + sess->iv.length + auth_only_len + 196 sess->digest_length); 197 198 /* Configure Input SGE for Encap/Decap */ 199 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr)); 200 sge->length = sess->iv.length; 201 sge++; 202 if (auth_only_len) { 203 DPAA2_SET_FLE_ADDR(sge, 204 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data)); 205 sge->length = auth_only_len; 206 DPAA2_SET_FLE_BPID(sge, bpid); 207 sge++; 208 } 209 210 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 211 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + 212 sym_op->m_src->data_off); 213 sge->length = sym_op->aead.data.length; 214 if (sess->dir == DIR_DEC) { 215 sge++; 216 old_icv = (uint8_t *)(sge + 1); 217 memcpy(old_icv, sym_op->aead.digest.data, 218 sess->digest_length); 219 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 220 sge->length = sess->digest_length; 221 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length + 222 sess->digest_length + 223 sess->iv.length + 224 auth_only_len)); 225 } 226 DPAA2_SET_FLE_FIN(sge); 227 228 if (auth_only_len) { 229 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 230 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 231 } 232 233 return 0; 234 } 235 236 static inline int 237 build_authenc_fd(dpaa2_sec_session *sess, 238 struct rte_crypto_op *op, 239 struct qbman_fd *fd, uint16_t bpid) 240 { 241 struct rte_crypto_sym_op *sym_op = op->sym; 242 struct ctxt_priv *priv = sess->ctxt; 243 struct qbman_fle *fle, *sge; 244 struct sec_flow_context *flc; 245 uint32_t auth_only_len = sym_op->auth.data.length - 246 sym_op->cipher.data.length; 247 int icv_len = sess->digest_length, retval; 248 uint8_t *old_icv; 249 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 250 sess->iv.offset); 251 struct rte_mbuf *dst; 252 253 PMD_INIT_FUNC_TRACE(); 254 255 if (sym_op->m_dst) 256 dst = sym_op->m_dst; 257 else 258 dst = sym_op->m_src; 259 260 /* we are using the first FLE entry to store Mbuf. 261 * Currently we donot know which FLE has the mbuf stored. 262 * So while retreiving we can go back 1 FLE from the FD -ADDR 263 * to get the MBUF Addr from the previous FLE. 264 * We can have a better approach to use the inline Mbuf 265 */ 266 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 267 if (retval) { 268 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n"); 269 return -1; 270 } 271 memset(fle, 0, FLE_POOL_BUF_SIZE); 272 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op)); 273 DPAA2_FLE_SAVE_CTXT(fle, priv); 274 fle = fle + 1; 275 sge = fle + 2; 276 if (likely(bpid < MAX_BPID)) { 277 DPAA2_SET_FD_BPID(fd, bpid); 278 DPAA2_SET_FLE_BPID(fle, bpid); 279 DPAA2_SET_FLE_BPID(fle + 1, bpid); 280 DPAA2_SET_FLE_BPID(sge, bpid); 281 DPAA2_SET_FLE_BPID(sge + 1, bpid); 282 DPAA2_SET_FLE_BPID(sge + 2, bpid); 283 DPAA2_SET_FLE_BPID(sge + 3, bpid); 284 } else { 285 DPAA2_SET_FD_IVP(fd); 286 DPAA2_SET_FLE_IVP(fle); 287 DPAA2_SET_FLE_IVP((fle + 1)); 288 DPAA2_SET_FLE_IVP(sge); 289 DPAA2_SET_FLE_IVP((sge + 1)); 290 DPAA2_SET_FLE_IVP((sge + 2)); 291 DPAA2_SET_FLE_IVP((sge + 3)); 292 } 293 294 /* Save the shared descriptor */ 295 flc = &priv->flc_desc[0].flc; 296 /* Configure FD as a FRAME LIST */ 297 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 298 DPAA2_SET_FD_COMPOUND_FMT(fd); 299 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 300 301 PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n" 302 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", 303 sym_op->auth.data.offset, 304 sym_op->auth.data.length, 305 sess->digest_length, 306 sym_op->cipher.data.offset, 307 sym_op->cipher.data.length, 308 sess->iv.length, 309 sym_op->m_src->data_off); 310 311 /* Configure Output FLE with Scatter/Gather Entry */ 312 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 313 if (auth_only_len) 314 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 315 fle->length = (sess->dir == DIR_ENC) ? 316 (sym_op->cipher.data.length + icv_len) : 317 sym_op->cipher.data.length; 318 319 DPAA2_SET_FLE_SG_EXT(fle); 320 321 /* Configure Output SGE for Encap/Decap */ 322 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 323 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + 324 dst->data_off); 325 sge->length = sym_op->cipher.data.length; 326 327 if (sess->dir == DIR_ENC) { 328 sge++; 329 DPAA2_SET_FLE_ADDR(sge, 330 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 331 sge->length = sess->digest_length; 332 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 333 sess->iv.length)); 334 } 335 DPAA2_SET_FLE_FIN(sge); 336 337 sge++; 338 fle++; 339 340 /* Configure Input FLE with Scatter/Gather Entry */ 341 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 342 DPAA2_SET_FLE_SG_EXT(fle); 343 DPAA2_SET_FLE_FIN(fle); 344 fle->length = (sess->dir == DIR_ENC) ? 345 (sym_op->auth.data.length + sess->iv.length) : 346 (sym_op->auth.data.length + sess->iv.length + 347 sess->digest_length); 348 349 /* Configure Input SGE for Encap/Decap */ 350 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 351 sge->length = sess->iv.length; 352 sge++; 353 354 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 355 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + 356 sym_op->m_src->data_off); 357 sge->length = sym_op->auth.data.length; 358 if (sess->dir == DIR_DEC) { 359 sge++; 360 old_icv = (uint8_t *)(sge + 1); 361 memcpy(old_icv, sym_op->auth.digest.data, 362 sess->digest_length); 363 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 364 sge->length = sess->digest_length; 365 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 366 sess->digest_length + 367 sess->iv.length)); 368 } 369 DPAA2_SET_FLE_FIN(sge); 370 if (auth_only_len) { 371 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 372 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 373 } 374 return 0; 375 } 376 377 static inline int 378 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 379 struct qbman_fd *fd, uint16_t bpid) 380 { 381 struct rte_crypto_sym_op *sym_op = op->sym; 382 struct qbman_fle *fle, *sge; 383 struct sec_flow_context *flc; 384 struct ctxt_priv *priv = sess->ctxt; 385 uint8_t *old_digest; 386 int retval; 387 388 PMD_INIT_FUNC_TRACE(); 389 390 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 391 if (retval) { 392 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n"); 393 return -1; 394 } 395 memset(fle, 0, FLE_POOL_BUF_SIZE); 396 /* TODO we are using the first FLE entry to store Mbuf. 397 * Currently we donot know which FLE has the mbuf stored. 398 * So while retreiving we can go back 1 FLE from the FD -ADDR 399 * to get the MBUF Addr from the previous FLE. 400 * We can have a better approach to use the inline Mbuf 401 */ 402 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op)); 403 DPAA2_FLE_SAVE_CTXT(fle, priv); 404 fle = fle + 1; 405 406 if (likely(bpid < MAX_BPID)) { 407 DPAA2_SET_FD_BPID(fd, bpid); 408 DPAA2_SET_FLE_BPID(fle, bpid); 409 DPAA2_SET_FLE_BPID(fle + 1, bpid); 410 } else { 411 DPAA2_SET_FD_IVP(fd); 412 DPAA2_SET_FLE_IVP(fle); 413 DPAA2_SET_FLE_IVP((fle + 1)); 414 } 415 flc = &priv->flc_desc[DESC_INITFINAL].flc; 416 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 417 418 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 419 fle->length = sess->digest_length; 420 421 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 422 DPAA2_SET_FD_COMPOUND_FMT(fd); 423 fle++; 424 425 if (sess->dir == DIR_ENC) { 426 DPAA2_SET_FLE_ADDR(fle, 427 DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 428 DPAA2_SET_FLE_OFFSET(fle, sym_op->auth.data.offset + 429 sym_op->m_src->data_off); 430 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length); 431 fle->length = sym_op->auth.data.length; 432 } else { 433 sge = fle + 2; 434 DPAA2_SET_FLE_SG_EXT(fle); 435 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 436 437 if (likely(bpid < MAX_BPID)) { 438 DPAA2_SET_FLE_BPID(sge, bpid); 439 DPAA2_SET_FLE_BPID(sge + 1, bpid); 440 } else { 441 DPAA2_SET_FLE_IVP(sge); 442 DPAA2_SET_FLE_IVP((sge + 1)); 443 } 444 DPAA2_SET_FLE_ADDR(sge, 445 DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 446 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + 447 sym_op->m_src->data_off); 448 449 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length + 450 sess->digest_length); 451 sge->length = sym_op->auth.data.length; 452 sge++; 453 old_digest = (uint8_t *)(sge + 1); 454 rte_memcpy(old_digest, sym_op->auth.digest.data, 455 sess->digest_length); 456 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest)); 457 sge->length = sess->digest_length; 458 fle->length = sym_op->auth.data.length + 459 sess->digest_length; 460 DPAA2_SET_FLE_FIN(sge); 461 } 462 DPAA2_SET_FLE_FIN(fle); 463 464 return 0; 465 } 466 467 static int 468 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 469 struct qbman_fd *fd, uint16_t bpid) 470 { 471 struct rte_crypto_sym_op *sym_op = op->sym; 472 struct qbman_fle *fle, *sge; 473 int retval; 474 struct sec_flow_context *flc; 475 struct ctxt_priv *priv = sess->ctxt; 476 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 477 sess->iv.offset); 478 struct rte_mbuf *dst; 479 480 PMD_INIT_FUNC_TRACE(); 481 482 if (sym_op->m_dst) 483 dst = sym_op->m_dst; 484 else 485 dst = sym_op->m_src; 486 487 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 488 if (retval) { 489 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n"); 490 return -1; 491 } 492 memset(fle, 0, FLE_POOL_BUF_SIZE); 493 /* TODO we are using the first FLE entry to store Mbuf. 494 * Currently we donot know which FLE has the mbuf stored. 495 * So while retreiving we can go back 1 FLE from the FD -ADDR 496 * to get the MBUF Addr from the previous FLE. 497 * We can have a better approach to use the inline Mbuf 498 */ 499 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op)); 500 DPAA2_FLE_SAVE_CTXT(fle, priv); 501 fle = fle + 1; 502 sge = fle + 2; 503 504 if (likely(bpid < MAX_BPID)) { 505 DPAA2_SET_FD_BPID(fd, bpid); 506 DPAA2_SET_FLE_BPID(fle, bpid); 507 DPAA2_SET_FLE_BPID(fle + 1, bpid); 508 DPAA2_SET_FLE_BPID(sge, bpid); 509 DPAA2_SET_FLE_BPID(sge + 1, bpid); 510 } else { 511 DPAA2_SET_FD_IVP(fd); 512 DPAA2_SET_FLE_IVP(fle); 513 DPAA2_SET_FLE_IVP((fle + 1)); 514 DPAA2_SET_FLE_IVP(sge); 515 DPAA2_SET_FLE_IVP((sge + 1)); 516 } 517 518 flc = &priv->flc_desc[0].flc; 519 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 520 DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length + 521 sess->iv.length); 522 DPAA2_SET_FD_COMPOUND_FMT(fd); 523 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 524 525 PMD_TX_LOG(DEBUG, "cipher_off: 0x%x/length %d,ivlen=%d data_off: 0x%x", 526 sym_op->cipher.data.offset, 527 sym_op->cipher.data.length, 528 sess->iv.length, 529 sym_op->m_src->data_off); 530 531 DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst)); 532 DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset + 533 dst->data_off); 534 535 fle->length = sym_op->cipher.data.length + sess->iv.length; 536 537 PMD_TX_LOG(DEBUG, "1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d", 538 flc, fle, fle->addr_hi, fle->addr_lo, fle->length); 539 540 fle++; 541 542 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 543 fle->length = sym_op->cipher.data.length + sess->iv.length; 544 545 DPAA2_SET_FLE_SG_EXT(fle); 546 547 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 548 sge->length = sess->iv.length; 549 550 sge++; 551 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 552 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + 553 sym_op->m_src->data_off); 554 555 sge->length = sym_op->cipher.data.length; 556 DPAA2_SET_FLE_FIN(sge); 557 DPAA2_SET_FLE_FIN(fle); 558 559 PMD_TX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d", 560 (void *)DPAA2_GET_FD_ADDR(fd), 561 DPAA2_GET_FD_BPID(fd), 562 rte_dpaa2_bpid_info[bpid].meta_data_size, 563 DPAA2_GET_FD_OFFSET(fd), 564 DPAA2_GET_FD_LEN(fd)); 565 566 return 0; 567 } 568 569 static inline int 570 build_sec_fd(struct rte_crypto_op *op, 571 struct qbman_fd *fd, uint16_t bpid) 572 { 573 int ret = -1; 574 dpaa2_sec_session *sess; 575 576 PMD_INIT_FUNC_TRACE(); 577 /* 578 * Segmented buffer is not supported. 579 */ 580 if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) { 581 op->status = RTE_CRYPTO_OP_STATUS_ERROR; 582 return -ENOTSUP; 583 } 584 585 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) 586 sess = (dpaa2_sec_session *)get_session_private_data( 587 op->sym->session, cryptodev_driver_id); 588 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) 589 sess = (dpaa2_sec_session *)get_sec_session_private_data( 590 op->sym->sec_session); 591 else 592 return -1; 593 594 switch (sess->ctxt_type) { 595 case DPAA2_SEC_CIPHER: 596 ret = build_cipher_fd(sess, op, fd, bpid); 597 break; 598 case DPAA2_SEC_AUTH: 599 ret = build_auth_fd(sess, op, fd, bpid); 600 break; 601 case DPAA2_SEC_AEAD: 602 ret = build_authenc_gcm_fd(sess, op, fd, bpid); 603 break; 604 case DPAA2_SEC_CIPHER_HASH: 605 ret = build_authenc_fd(sess, op, fd, bpid); 606 break; 607 case DPAA2_SEC_IPSEC: 608 ret = build_proto_fd(sess, op, fd, bpid); 609 break; 610 case DPAA2_SEC_HASH_CIPHER: 611 default: 612 RTE_LOG(ERR, PMD, "error: Unsupported session\n"); 613 } 614 return ret; 615 } 616 617 static uint16_t 618 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, 619 uint16_t nb_ops) 620 { 621 /* Function to transmit the frames to given device and VQ*/ 622 uint32_t loop; 623 int32_t ret; 624 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 625 uint32_t frames_to_send; 626 struct qbman_eq_desc eqdesc; 627 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 628 struct qbman_swp *swp; 629 uint16_t num_tx = 0; 630 /*todo - need to support multiple buffer pools */ 631 uint16_t bpid; 632 struct rte_mempool *mb_pool; 633 634 if (unlikely(nb_ops == 0)) 635 return 0; 636 637 if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { 638 RTE_LOG(ERR, PMD, "sessionless crypto op not supported\n"); 639 return 0; 640 } 641 /*Prepare enqueue descriptor*/ 642 qbman_eq_desc_clear(&eqdesc); 643 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); 644 qbman_eq_desc_set_response(&eqdesc, 0, 0); 645 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid); 646 647 if (!DPAA2_PER_LCORE_SEC_DPIO) { 648 ret = dpaa2_affine_qbman_swp_sec(); 649 if (ret) { 650 RTE_LOG(ERR, PMD, "Failure in affining portal\n"); 651 return 0; 652 } 653 } 654 swp = DPAA2_PER_LCORE_SEC_PORTAL; 655 656 while (nb_ops) { 657 frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops; 658 659 for (loop = 0; loop < frames_to_send; loop++) { 660 /*Clear the unused FD fields before sending*/ 661 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd)); 662 mb_pool = (*ops)->sym->m_src->pool; 663 bpid = mempool_to_bpid(mb_pool); 664 ret = build_sec_fd(*ops, &fd_arr[loop], bpid); 665 if (ret) { 666 PMD_DRV_LOG(ERR, "error: Improper packet" 667 " contents for crypto operation\n"); 668 goto skip_tx; 669 } 670 ops++; 671 } 672 loop = 0; 673 while (loop < frames_to_send) { 674 loop += qbman_swp_enqueue_multiple(swp, &eqdesc, 675 &fd_arr[loop], 676 frames_to_send - loop); 677 } 678 679 num_tx += frames_to_send; 680 nb_ops -= frames_to_send; 681 } 682 skip_tx: 683 dpaa2_qp->tx_vq.tx_pkts += num_tx; 684 dpaa2_qp->tx_vq.err_pkts += nb_ops; 685 return num_tx; 686 } 687 688 static inline struct rte_crypto_op * 689 sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id) 690 { 691 struct rte_crypto_op *op; 692 uint16_t len = DPAA2_GET_FD_LEN(fd); 693 uint16_t diff = 0; 694 dpaa2_sec_session *sess_priv; 695 696 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF( 697 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)), 698 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 699 700 op = (struct rte_crypto_op *)mbuf->buf_iova; 701 mbuf->buf_iova = op->sym->aead.digest.phys_addr; 702 op->sym->aead.digest.phys_addr = 0L; 703 704 sess_priv = (dpaa2_sec_session *)get_sec_session_private_data( 705 op->sym->sec_session); 706 if (sess_priv->dir == DIR_ENC) 707 mbuf->data_off += SEC_FLC_DHR_OUTBOUND; 708 else 709 mbuf->data_off += SEC_FLC_DHR_INBOUND; 710 diff = len - mbuf->pkt_len; 711 mbuf->pkt_len += diff; 712 mbuf->data_len += diff; 713 714 return op; 715 } 716 717 static inline struct rte_crypto_op * 718 sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id) 719 { 720 struct qbman_fle *fle; 721 struct rte_crypto_op *op; 722 struct ctxt_priv *priv; 723 struct rte_mbuf *dst, *src; 724 725 if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single) 726 return sec_simple_fd_to_mbuf(fd, driver_id); 727 728 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); 729 730 PMD_RX_LOG(DEBUG, "FLE addr = %x - %x, offset = %x", 731 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset); 732 733 /* we are using the first FLE entry to store Mbuf. 734 * Currently we donot know which FLE has the mbuf stored. 735 * So while retreiving we can go back 1 FLE from the FD -ADDR 736 * to get the MBUF Addr from the previous FLE. 737 * We can have a better approach to use the inline Mbuf 738 */ 739 740 if (unlikely(DPAA2_GET_FD_IVP(fd))) { 741 /* TODO complete it. */ 742 RTE_LOG(ERR, PMD, "error: Non inline buffer - WHAT to DO?\n"); 743 return NULL; 744 } 745 op = (struct rte_crypto_op *)DPAA2_IOVA_TO_VADDR( 746 DPAA2_GET_FLE_ADDR((fle - 1))); 747 748 /* Prefeth op */ 749 src = op->sym->m_src; 750 rte_prefetch0(src); 751 752 if (op->sym->m_dst) { 753 dst = op->sym->m_dst; 754 rte_prefetch0(dst); 755 } else 756 dst = src; 757 758 PMD_RX_LOG(DEBUG, "mbuf %p BMAN buf addr %p", 759 (void *)dst, dst->buf_addr); 760 761 PMD_RX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d", 762 (void *)DPAA2_GET_FD_ADDR(fd), 763 DPAA2_GET_FD_BPID(fd), 764 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, 765 DPAA2_GET_FD_OFFSET(fd), 766 DPAA2_GET_FD_LEN(fd)); 767 768 /* free the fle memory */ 769 priv = (struct ctxt_priv *)DPAA2_GET_FLE_CTXT(fle - 1); 770 rte_mempool_put(priv->fle_pool, (void *)(fle - 1)); 771 772 return op; 773 } 774 775 static uint16_t 776 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, 777 uint16_t nb_ops) 778 { 779 /* Function is responsible to receive frames for a given device and VQ*/ 780 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 781 struct rte_cryptodev *dev = 782 (struct rte_cryptodev *)(dpaa2_qp->rx_vq.dev); 783 struct qbman_result *dq_storage; 784 uint32_t fqid = dpaa2_qp->rx_vq.fqid; 785 int ret, num_rx = 0; 786 uint8_t is_last = 0, status; 787 struct qbman_swp *swp; 788 const struct qbman_fd *fd; 789 struct qbman_pull_desc pulldesc; 790 791 if (!DPAA2_PER_LCORE_SEC_DPIO) { 792 ret = dpaa2_affine_qbman_swp_sec(); 793 if (ret) { 794 RTE_LOG(ERR, PMD, "Failure in affining portal\n"); 795 return 0; 796 } 797 } 798 swp = DPAA2_PER_LCORE_SEC_PORTAL; 799 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0]; 800 801 qbman_pull_desc_clear(&pulldesc); 802 qbman_pull_desc_set_numframes(&pulldesc, 803 (nb_ops > DPAA2_DQRR_RING_SIZE) ? 804 DPAA2_DQRR_RING_SIZE : nb_ops); 805 qbman_pull_desc_set_fq(&pulldesc, fqid); 806 qbman_pull_desc_set_storage(&pulldesc, dq_storage, 807 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage), 808 1); 809 810 /*Issue a volatile dequeue command. */ 811 while (1) { 812 if (qbman_swp_pull(swp, &pulldesc)) { 813 RTE_LOG(WARNING, PMD, 814 "SEC VDQ command is not issued : QBMAN busy\n"); 815 /* Portal was busy, try again */ 816 continue; 817 } 818 break; 819 }; 820 821 /* Receive the packets till Last Dequeue entry is found with 822 * respect to the above issues PULL command. 823 */ 824 while (!is_last) { 825 /* Check if the previous issued command is completed. 826 * Also seems like the SWP is shared between the Ethernet Driver 827 * and the SEC driver. 828 */ 829 while (!qbman_check_command_complete(dq_storage)) 830 ; 831 832 /* Loop until the dq_storage is updated with 833 * new token by QBMAN 834 */ 835 while (!qbman_check_new_result(dq_storage)) 836 ; 837 /* Check whether Last Pull command is Expired and 838 * setting Condition for Loop termination 839 */ 840 if (qbman_result_DQ_is_pull_complete(dq_storage)) { 841 is_last = 1; 842 /* Check for valid frame. */ 843 status = (uint8_t)qbman_result_DQ_flags(dq_storage); 844 if (unlikely( 845 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) { 846 PMD_RX_LOG(DEBUG, "No frame is delivered"); 847 continue; 848 } 849 } 850 851 fd = qbman_result_DQ_fd(dq_storage); 852 ops[num_rx] = sec_fd_to_mbuf(fd, dev->driver_id); 853 854 if (unlikely(fd->simple.frc)) { 855 /* TODO Parse SEC errors */ 856 RTE_LOG(ERR, PMD, "SEC returned Error - %x\n", 857 fd->simple.frc); 858 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR; 859 } else { 860 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 861 } 862 863 num_rx++; 864 dq_storage++; 865 } /* End of Packet Rx loop */ 866 867 dpaa2_qp->rx_vq.rx_pkts += num_rx; 868 869 PMD_RX_LOG(DEBUG, "SEC Received %d Packets", num_rx); 870 /*Return the total number of packets received to DPAA2 app*/ 871 return num_rx; 872 } 873 874 /** Release queue pair */ 875 static int 876 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) 877 { 878 struct dpaa2_sec_qp *qp = 879 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id]; 880 881 PMD_INIT_FUNC_TRACE(); 882 883 if (qp->rx_vq.q_storage) { 884 dpaa2_free_dq_storage(qp->rx_vq.q_storage); 885 rte_free(qp->rx_vq.q_storage); 886 } 887 rte_free(qp); 888 889 dev->data->queue_pairs[queue_pair_id] = NULL; 890 891 return 0; 892 } 893 894 /** Setup a queue pair */ 895 static int 896 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, 897 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf, 898 __rte_unused int socket_id, 899 __rte_unused struct rte_mempool *session_pool) 900 { 901 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 902 struct dpaa2_sec_qp *qp; 903 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 904 struct dpseci_rx_queue_cfg cfg; 905 int32_t retcode; 906 907 PMD_INIT_FUNC_TRACE(); 908 909 /* If qp is already in use free ring memory and qp metadata. */ 910 if (dev->data->queue_pairs[qp_id] != NULL) { 911 PMD_DRV_LOG(INFO, "QP already setup"); 912 return 0; 913 } 914 915 PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, conf =%p", 916 dev, qp_id, qp_conf); 917 918 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 919 920 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp), 921 RTE_CACHE_LINE_SIZE); 922 if (!qp) { 923 RTE_LOG(ERR, PMD, "malloc failed for rx/tx queues\n"); 924 return -1; 925 } 926 927 qp->rx_vq.dev = dev; 928 qp->tx_vq.dev = dev; 929 qp->rx_vq.q_storage = rte_malloc("sec dq storage", 930 sizeof(struct queue_storage_info_t), 931 RTE_CACHE_LINE_SIZE); 932 if (!qp->rx_vq.q_storage) { 933 RTE_LOG(ERR, PMD, "malloc failed for q_storage\n"); 934 return -1; 935 } 936 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t)); 937 938 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) { 939 RTE_LOG(ERR, PMD, "dpaa2_alloc_dq_storage failed\n"); 940 return -1; 941 } 942 943 dev->data->queue_pairs[qp_id] = qp; 944 945 cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX; 946 cfg.user_ctx = (uint64_t)(&qp->rx_vq); 947 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 948 qp_id, &cfg); 949 return retcode; 950 } 951 952 /** Start queue pair */ 953 static int 954 dpaa2_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev, 955 __rte_unused uint16_t queue_pair_id) 956 { 957 PMD_INIT_FUNC_TRACE(); 958 959 return 0; 960 } 961 962 /** Stop queue pair */ 963 static int 964 dpaa2_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev, 965 __rte_unused uint16_t queue_pair_id) 966 { 967 PMD_INIT_FUNC_TRACE(); 968 969 return 0; 970 } 971 972 /** Return the number of allocated queue pairs */ 973 static uint32_t 974 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev) 975 { 976 PMD_INIT_FUNC_TRACE(); 977 978 return dev->data->nb_queue_pairs; 979 } 980 981 /** Returns the size of the aesni gcm session structure */ 982 static unsigned int 983 dpaa2_sec_session_get_size(struct rte_cryptodev *dev __rte_unused) 984 { 985 PMD_INIT_FUNC_TRACE(); 986 987 return sizeof(dpaa2_sec_session); 988 } 989 990 static int 991 dpaa2_sec_cipher_init(struct rte_cryptodev *dev, 992 struct rte_crypto_sym_xform *xform, 993 dpaa2_sec_session *session) 994 { 995 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 996 struct alginfo cipherdata; 997 int bufsize, i; 998 struct ctxt_priv *priv; 999 struct sec_flow_context *flc; 1000 1001 PMD_INIT_FUNC_TRACE(); 1002 1003 /* For SEC CIPHER only one descriptor is required. */ 1004 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1005 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 1006 RTE_CACHE_LINE_SIZE); 1007 if (priv == NULL) { 1008 RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n"); 1009 return -1; 1010 } 1011 1012 priv->fle_pool = dev_priv->fle_pool; 1013 1014 flc = &priv->flc_desc[0].flc; 1015 1016 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, 1017 RTE_CACHE_LINE_SIZE); 1018 if (session->cipher_key.data == NULL) { 1019 RTE_LOG(ERR, PMD, "No Memory for cipher key\n"); 1020 rte_free(priv); 1021 return -1; 1022 } 1023 session->cipher_key.length = xform->cipher.key.length; 1024 1025 memcpy(session->cipher_key.data, xform->cipher.key.data, 1026 xform->cipher.key.length); 1027 cipherdata.key = (uint64_t)session->cipher_key.data; 1028 cipherdata.keylen = session->cipher_key.length; 1029 cipherdata.key_enc_flags = 0; 1030 cipherdata.key_type = RTA_DATA_IMM; 1031 1032 /* Set IV parameters */ 1033 session->iv.offset = xform->cipher.iv.offset; 1034 session->iv.length = xform->cipher.iv.length; 1035 1036 switch (xform->cipher.algo) { 1037 case RTE_CRYPTO_CIPHER_AES_CBC: 1038 cipherdata.algtype = OP_ALG_ALGSEL_AES; 1039 cipherdata.algmode = OP_ALG_AAI_CBC; 1040 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 1041 break; 1042 case RTE_CRYPTO_CIPHER_3DES_CBC: 1043 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 1044 cipherdata.algmode = OP_ALG_AAI_CBC; 1045 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 1046 break; 1047 case RTE_CRYPTO_CIPHER_AES_CTR: 1048 cipherdata.algtype = OP_ALG_ALGSEL_AES; 1049 cipherdata.algmode = OP_ALG_AAI_CTR; 1050 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 1051 break; 1052 case RTE_CRYPTO_CIPHER_3DES_CTR: 1053 case RTE_CRYPTO_CIPHER_AES_ECB: 1054 case RTE_CRYPTO_CIPHER_3DES_ECB: 1055 case RTE_CRYPTO_CIPHER_AES_XTS: 1056 case RTE_CRYPTO_CIPHER_AES_F8: 1057 case RTE_CRYPTO_CIPHER_ARC4: 1058 case RTE_CRYPTO_CIPHER_KASUMI_F8: 1059 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 1060 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 1061 case RTE_CRYPTO_CIPHER_NULL: 1062 RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n", 1063 xform->cipher.algo); 1064 goto error_out; 1065 default: 1066 RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n", 1067 xform->cipher.algo); 1068 goto error_out; 1069 } 1070 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1071 DIR_ENC : DIR_DEC; 1072 1073 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 1074 &cipherdata, NULL, session->iv.length, 1075 session->dir); 1076 if (bufsize < 0) { 1077 RTE_LOG(ERR, PMD, "Crypto: Descriptor build failed\n"); 1078 goto error_out; 1079 } 1080 flc->dhr = 0; 1081 flc->bpv0 = 0x1; 1082 flc->mode_bits = 0x8000; 1083 1084 flc->word1_sdl = (uint8_t)bufsize; 1085 flc->word2_rflc_31_0 = lower_32_bits( 1086 (uint64_t)&(((struct dpaa2_sec_qp *) 1087 dev->data->queue_pairs[0])->rx_vq)); 1088 flc->word3_rflc_63_32 = upper_32_bits( 1089 (uint64_t)&(((struct dpaa2_sec_qp *) 1090 dev->data->queue_pairs[0])->rx_vq)); 1091 session->ctxt = priv; 1092 1093 for (i = 0; i < bufsize; i++) 1094 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n", 1095 i, priv->flc_desc[0].desc[i]); 1096 1097 return 0; 1098 1099 error_out: 1100 rte_free(session->cipher_key.data); 1101 rte_free(priv); 1102 return -1; 1103 } 1104 1105 static int 1106 dpaa2_sec_auth_init(struct rte_cryptodev *dev, 1107 struct rte_crypto_sym_xform *xform, 1108 dpaa2_sec_session *session) 1109 { 1110 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1111 struct alginfo authdata; 1112 unsigned int bufsize, i; 1113 struct ctxt_priv *priv; 1114 struct sec_flow_context *flc; 1115 1116 PMD_INIT_FUNC_TRACE(); 1117 1118 /* For SEC AUTH three descriptors are required for various stages */ 1119 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1120 sizeof(struct ctxt_priv) + 3 * 1121 sizeof(struct sec_flc_desc), 1122 RTE_CACHE_LINE_SIZE); 1123 if (priv == NULL) { 1124 RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n"); 1125 return -1; 1126 } 1127 1128 priv->fle_pool = dev_priv->fle_pool; 1129 flc = &priv->flc_desc[DESC_INITFINAL].flc; 1130 1131 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length, 1132 RTE_CACHE_LINE_SIZE); 1133 if (session->auth_key.data == NULL) { 1134 RTE_LOG(ERR, PMD, "No Memory for auth key\n"); 1135 rte_free(priv); 1136 return -1; 1137 } 1138 session->auth_key.length = xform->auth.key.length; 1139 1140 memcpy(session->auth_key.data, xform->auth.key.data, 1141 xform->auth.key.length); 1142 authdata.key = (uint64_t)session->auth_key.data; 1143 authdata.keylen = session->auth_key.length; 1144 authdata.key_enc_flags = 0; 1145 authdata.key_type = RTA_DATA_IMM; 1146 1147 session->digest_length = xform->auth.digest_length; 1148 1149 switch (xform->auth.algo) { 1150 case RTE_CRYPTO_AUTH_SHA1_HMAC: 1151 authdata.algtype = OP_ALG_ALGSEL_SHA1; 1152 authdata.algmode = OP_ALG_AAI_HMAC; 1153 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 1154 break; 1155 case RTE_CRYPTO_AUTH_MD5_HMAC: 1156 authdata.algtype = OP_ALG_ALGSEL_MD5; 1157 authdata.algmode = OP_ALG_AAI_HMAC; 1158 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 1159 break; 1160 case RTE_CRYPTO_AUTH_SHA256_HMAC: 1161 authdata.algtype = OP_ALG_ALGSEL_SHA256; 1162 authdata.algmode = OP_ALG_AAI_HMAC; 1163 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 1164 break; 1165 case RTE_CRYPTO_AUTH_SHA384_HMAC: 1166 authdata.algtype = OP_ALG_ALGSEL_SHA384; 1167 authdata.algmode = OP_ALG_AAI_HMAC; 1168 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 1169 break; 1170 case RTE_CRYPTO_AUTH_SHA512_HMAC: 1171 authdata.algtype = OP_ALG_ALGSEL_SHA512; 1172 authdata.algmode = OP_ALG_AAI_HMAC; 1173 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 1174 break; 1175 case RTE_CRYPTO_AUTH_SHA224_HMAC: 1176 authdata.algtype = OP_ALG_ALGSEL_SHA224; 1177 authdata.algmode = OP_ALG_AAI_HMAC; 1178 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 1179 break; 1180 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 1181 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 1182 case RTE_CRYPTO_AUTH_NULL: 1183 case RTE_CRYPTO_AUTH_SHA1: 1184 case RTE_CRYPTO_AUTH_SHA256: 1185 case RTE_CRYPTO_AUTH_SHA512: 1186 case RTE_CRYPTO_AUTH_SHA224: 1187 case RTE_CRYPTO_AUTH_SHA384: 1188 case RTE_CRYPTO_AUTH_MD5: 1189 case RTE_CRYPTO_AUTH_AES_GMAC: 1190 case RTE_CRYPTO_AUTH_KASUMI_F9: 1191 case RTE_CRYPTO_AUTH_AES_CMAC: 1192 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 1193 case RTE_CRYPTO_AUTH_ZUC_EIA3: 1194 RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n", 1195 xform->auth.algo); 1196 goto error_out; 1197 default: 1198 RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n", 1199 xform->auth.algo); 1200 goto error_out; 1201 } 1202 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? 1203 DIR_ENC : DIR_DEC; 1204 1205 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 1206 1, 0, &authdata, !session->dir, 1207 session->digest_length); 1208 1209 flc->word1_sdl = (uint8_t)bufsize; 1210 flc->word2_rflc_31_0 = lower_32_bits( 1211 (uint64_t)&(((struct dpaa2_sec_qp *) 1212 dev->data->queue_pairs[0])->rx_vq)); 1213 flc->word3_rflc_63_32 = upper_32_bits( 1214 (uint64_t)&(((struct dpaa2_sec_qp *) 1215 dev->data->queue_pairs[0])->rx_vq)); 1216 session->ctxt = priv; 1217 for (i = 0; i < bufsize; i++) 1218 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n", 1219 i, priv->flc_desc[DESC_INITFINAL].desc[i]); 1220 1221 1222 return 0; 1223 1224 error_out: 1225 rte_free(session->auth_key.data); 1226 rte_free(priv); 1227 return -1; 1228 } 1229 1230 static int 1231 dpaa2_sec_aead_init(struct rte_cryptodev *dev, 1232 struct rte_crypto_sym_xform *xform, 1233 dpaa2_sec_session *session) 1234 { 1235 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt; 1236 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1237 struct alginfo aeaddata; 1238 unsigned int bufsize, i; 1239 struct ctxt_priv *priv; 1240 struct sec_flow_context *flc; 1241 struct rte_crypto_aead_xform *aead_xform = &xform->aead; 1242 int err; 1243 1244 PMD_INIT_FUNC_TRACE(); 1245 1246 /* Set IV parameters */ 1247 session->iv.offset = aead_xform->iv.offset; 1248 session->iv.length = aead_xform->iv.length; 1249 session->ctxt_type = DPAA2_SEC_AEAD; 1250 1251 /* For SEC AEAD only one descriptor is required */ 1252 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1253 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 1254 RTE_CACHE_LINE_SIZE); 1255 if (priv == NULL) { 1256 RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n"); 1257 return -1; 1258 } 1259 1260 priv->fle_pool = dev_priv->fle_pool; 1261 flc = &priv->flc_desc[0].flc; 1262 1263 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 1264 RTE_CACHE_LINE_SIZE); 1265 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 1266 RTE_LOG(ERR, PMD, "No Memory for aead key\n"); 1267 rte_free(priv); 1268 return -1; 1269 } 1270 memcpy(session->aead_key.data, aead_xform->key.data, 1271 aead_xform->key.length); 1272 1273 session->digest_length = aead_xform->digest_length; 1274 session->aead_key.length = aead_xform->key.length; 1275 ctxt->auth_only_len = aead_xform->aad_length; 1276 1277 aeaddata.key = (uint64_t)session->aead_key.data; 1278 aeaddata.keylen = session->aead_key.length; 1279 aeaddata.key_enc_flags = 0; 1280 aeaddata.key_type = RTA_DATA_IMM; 1281 1282 switch (aead_xform->algo) { 1283 case RTE_CRYPTO_AEAD_AES_GCM: 1284 aeaddata.algtype = OP_ALG_ALGSEL_AES; 1285 aeaddata.algmode = OP_ALG_AAI_GCM; 1286 session->cipher_alg = RTE_CRYPTO_AEAD_AES_GCM; 1287 break; 1288 case RTE_CRYPTO_AEAD_AES_CCM: 1289 RTE_LOG(ERR, PMD, "Crypto: Unsupported AEAD alg %u\n", 1290 aead_xform->algo); 1291 goto error_out; 1292 default: 1293 RTE_LOG(ERR, PMD, "Crypto: Undefined AEAD specified %u\n", 1294 aead_xform->algo); 1295 goto error_out; 1296 } 1297 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 1298 DIR_ENC : DIR_DEC; 1299 1300 priv->flc_desc[0].desc[0] = aeaddata.keylen; 1301 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 1302 MIN_JOB_DESC_SIZE, 1303 (unsigned int *)priv->flc_desc[0].desc, 1304 &priv->flc_desc[0].desc[1], 1); 1305 1306 if (err < 0) { 1307 PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths\n"); 1308 goto error_out; 1309 } 1310 if (priv->flc_desc[0].desc[1] & 1) { 1311 aeaddata.key_type = RTA_DATA_IMM; 1312 } else { 1313 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key); 1314 aeaddata.key_type = RTA_DATA_PTR; 1315 } 1316 priv->flc_desc[0].desc[0] = 0; 1317 priv->flc_desc[0].desc[1] = 0; 1318 1319 if (session->dir == DIR_ENC) 1320 bufsize = cnstr_shdsc_gcm_encap( 1321 priv->flc_desc[0].desc, 1, 0, 1322 &aeaddata, session->iv.length, 1323 session->digest_length); 1324 else 1325 bufsize = cnstr_shdsc_gcm_decap( 1326 priv->flc_desc[0].desc, 1, 0, 1327 &aeaddata, session->iv.length, 1328 session->digest_length); 1329 flc->word1_sdl = (uint8_t)bufsize; 1330 flc->word2_rflc_31_0 = lower_32_bits( 1331 (uint64_t)&(((struct dpaa2_sec_qp *) 1332 dev->data->queue_pairs[0])->rx_vq)); 1333 flc->word3_rflc_63_32 = upper_32_bits( 1334 (uint64_t)&(((struct dpaa2_sec_qp *) 1335 dev->data->queue_pairs[0])->rx_vq)); 1336 session->ctxt = priv; 1337 for (i = 0; i < bufsize; i++) 1338 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n", 1339 i, priv->flc_desc[0].desc[i]); 1340 1341 return 0; 1342 1343 error_out: 1344 rte_free(session->aead_key.data); 1345 rte_free(priv); 1346 return -1; 1347 } 1348 1349 1350 static int 1351 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, 1352 struct rte_crypto_sym_xform *xform, 1353 dpaa2_sec_session *session) 1354 { 1355 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt; 1356 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1357 struct alginfo authdata, cipherdata; 1358 unsigned int bufsize, i; 1359 struct ctxt_priv *priv; 1360 struct sec_flow_context *flc; 1361 struct rte_crypto_cipher_xform *cipher_xform; 1362 struct rte_crypto_auth_xform *auth_xform; 1363 int err; 1364 1365 PMD_INIT_FUNC_TRACE(); 1366 1367 if (session->ext_params.aead_ctxt.auth_cipher_text) { 1368 cipher_xform = &xform->cipher; 1369 auth_xform = &xform->next->auth; 1370 session->ctxt_type = 1371 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1372 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER; 1373 } else { 1374 cipher_xform = &xform->next->cipher; 1375 auth_xform = &xform->auth; 1376 session->ctxt_type = 1377 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1378 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH; 1379 } 1380 1381 /* Set IV parameters */ 1382 session->iv.offset = cipher_xform->iv.offset; 1383 session->iv.length = cipher_xform->iv.length; 1384 1385 /* For SEC AEAD only one descriptor is required */ 1386 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1387 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 1388 RTE_CACHE_LINE_SIZE); 1389 if (priv == NULL) { 1390 RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n"); 1391 return -1; 1392 } 1393 1394 priv->fle_pool = dev_priv->fle_pool; 1395 flc = &priv->flc_desc[0].flc; 1396 1397 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length, 1398 RTE_CACHE_LINE_SIZE); 1399 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) { 1400 RTE_LOG(ERR, PMD, "No Memory for cipher key\n"); 1401 rte_free(priv); 1402 return -1; 1403 } 1404 session->cipher_key.length = cipher_xform->key.length; 1405 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length, 1406 RTE_CACHE_LINE_SIZE); 1407 if (session->auth_key.data == NULL && auth_xform->key.length > 0) { 1408 RTE_LOG(ERR, PMD, "No Memory for auth key\n"); 1409 rte_free(session->cipher_key.data); 1410 rte_free(priv); 1411 return -1; 1412 } 1413 session->auth_key.length = auth_xform->key.length; 1414 memcpy(session->cipher_key.data, cipher_xform->key.data, 1415 cipher_xform->key.length); 1416 memcpy(session->auth_key.data, auth_xform->key.data, 1417 auth_xform->key.length); 1418 1419 authdata.key = (uint64_t)session->auth_key.data; 1420 authdata.keylen = session->auth_key.length; 1421 authdata.key_enc_flags = 0; 1422 authdata.key_type = RTA_DATA_IMM; 1423 1424 session->digest_length = auth_xform->digest_length; 1425 1426 switch (auth_xform->algo) { 1427 case RTE_CRYPTO_AUTH_SHA1_HMAC: 1428 authdata.algtype = OP_ALG_ALGSEL_SHA1; 1429 authdata.algmode = OP_ALG_AAI_HMAC; 1430 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 1431 break; 1432 case RTE_CRYPTO_AUTH_MD5_HMAC: 1433 authdata.algtype = OP_ALG_ALGSEL_MD5; 1434 authdata.algmode = OP_ALG_AAI_HMAC; 1435 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 1436 break; 1437 case RTE_CRYPTO_AUTH_SHA224_HMAC: 1438 authdata.algtype = OP_ALG_ALGSEL_SHA224; 1439 authdata.algmode = OP_ALG_AAI_HMAC; 1440 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 1441 break; 1442 case RTE_CRYPTO_AUTH_SHA256_HMAC: 1443 authdata.algtype = OP_ALG_ALGSEL_SHA256; 1444 authdata.algmode = OP_ALG_AAI_HMAC; 1445 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 1446 break; 1447 case RTE_CRYPTO_AUTH_SHA384_HMAC: 1448 authdata.algtype = OP_ALG_ALGSEL_SHA384; 1449 authdata.algmode = OP_ALG_AAI_HMAC; 1450 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 1451 break; 1452 case RTE_CRYPTO_AUTH_SHA512_HMAC: 1453 authdata.algtype = OP_ALG_ALGSEL_SHA512; 1454 authdata.algmode = OP_ALG_AAI_HMAC; 1455 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 1456 break; 1457 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 1458 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 1459 case RTE_CRYPTO_AUTH_NULL: 1460 case RTE_CRYPTO_AUTH_SHA1: 1461 case RTE_CRYPTO_AUTH_SHA256: 1462 case RTE_CRYPTO_AUTH_SHA512: 1463 case RTE_CRYPTO_AUTH_SHA224: 1464 case RTE_CRYPTO_AUTH_SHA384: 1465 case RTE_CRYPTO_AUTH_MD5: 1466 case RTE_CRYPTO_AUTH_AES_GMAC: 1467 case RTE_CRYPTO_AUTH_KASUMI_F9: 1468 case RTE_CRYPTO_AUTH_AES_CMAC: 1469 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 1470 case RTE_CRYPTO_AUTH_ZUC_EIA3: 1471 RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n", 1472 auth_xform->algo); 1473 goto error_out; 1474 default: 1475 RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n", 1476 auth_xform->algo); 1477 goto error_out; 1478 } 1479 cipherdata.key = (uint64_t)session->cipher_key.data; 1480 cipherdata.keylen = session->cipher_key.length; 1481 cipherdata.key_enc_flags = 0; 1482 cipherdata.key_type = RTA_DATA_IMM; 1483 1484 switch (cipher_xform->algo) { 1485 case RTE_CRYPTO_CIPHER_AES_CBC: 1486 cipherdata.algtype = OP_ALG_ALGSEL_AES; 1487 cipherdata.algmode = OP_ALG_AAI_CBC; 1488 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 1489 break; 1490 case RTE_CRYPTO_CIPHER_3DES_CBC: 1491 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 1492 cipherdata.algmode = OP_ALG_AAI_CBC; 1493 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 1494 break; 1495 case RTE_CRYPTO_CIPHER_AES_CTR: 1496 cipherdata.algtype = OP_ALG_ALGSEL_AES; 1497 cipherdata.algmode = OP_ALG_AAI_CTR; 1498 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 1499 break; 1500 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 1501 case RTE_CRYPTO_CIPHER_NULL: 1502 case RTE_CRYPTO_CIPHER_3DES_ECB: 1503 case RTE_CRYPTO_CIPHER_AES_ECB: 1504 case RTE_CRYPTO_CIPHER_KASUMI_F8: 1505 RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n", 1506 cipher_xform->algo); 1507 goto error_out; 1508 default: 1509 RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n", 1510 cipher_xform->algo); 1511 goto error_out; 1512 } 1513 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1514 DIR_ENC : DIR_DEC; 1515 1516 priv->flc_desc[0].desc[0] = cipherdata.keylen; 1517 priv->flc_desc[0].desc[1] = authdata.keylen; 1518 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 1519 MIN_JOB_DESC_SIZE, 1520 (unsigned int *)priv->flc_desc[0].desc, 1521 &priv->flc_desc[0].desc[2], 2); 1522 1523 if (err < 0) { 1524 PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths\n"); 1525 goto error_out; 1526 } 1527 if (priv->flc_desc[0].desc[2] & 1) { 1528 cipherdata.key_type = RTA_DATA_IMM; 1529 } else { 1530 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); 1531 cipherdata.key_type = RTA_DATA_PTR; 1532 } 1533 if (priv->flc_desc[0].desc[2] & (1 << 1)) { 1534 authdata.key_type = RTA_DATA_IMM; 1535 } else { 1536 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key); 1537 authdata.key_type = RTA_DATA_PTR; 1538 } 1539 priv->flc_desc[0].desc[0] = 0; 1540 priv->flc_desc[0].desc[1] = 0; 1541 priv->flc_desc[0].desc[2] = 0; 1542 1543 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) { 1544 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1, 1545 0, &cipherdata, &authdata, 1546 session->iv.length, 1547 ctxt->auth_only_len, 1548 session->digest_length, 1549 session->dir); 1550 } else { 1551 RTE_LOG(ERR, PMD, "Hash before cipher not supported\n"); 1552 goto error_out; 1553 } 1554 1555 flc->word1_sdl = (uint8_t)bufsize; 1556 flc->word2_rflc_31_0 = lower_32_bits( 1557 (uint64_t)&(((struct dpaa2_sec_qp *) 1558 dev->data->queue_pairs[0])->rx_vq)); 1559 flc->word3_rflc_63_32 = upper_32_bits( 1560 (uint64_t)&(((struct dpaa2_sec_qp *) 1561 dev->data->queue_pairs[0])->rx_vq)); 1562 session->ctxt = priv; 1563 for (i = 0; i < bufsize; i++) 1564 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n", 1565 i, priv->flc_desc[0].desc[i]); 1566 1567 return 0; 1568 1569 error_out: 1570 rte_free(session->cipher_key.data); 1571 rte_free(session->auth_key.data); 1572 rte_free(priv); 1573 return -1; 1574 } 1575 1576 static int 1577 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev, 1578 struct rte_crypto_sym_xform *xform, void *sess) 1579 { 1580 dpaa2_sec_session *session = sess; 1581 1582 PMD_INIT_FUNC_TRACE(); 1583 1584 if (unlikely(sess == NULL)) { 1585 RTE_LOG(ERR, PMD, "invalid session struct\n"); 1586 return -1; 1587 } 1588 1589 /* Default IV length = 0 */ 1590 session->iv.length = 0; 1591 1592 /* Cipher Only */ 1593 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 1594 session->ctxt_type = DPAA2_SEC_CIPHER; 1595 dpaa2_sec_cipher_init(dev, xform, session); 1596 1597 /* Authentication Only */ 1598 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 1599 xform->next == NULL) { 1600 session->ctxt_type = DPAA2_SEC_AUTH; 1601 dpaa2_sec_auth_init(dev, xform, session); 1602 1603 /* Cipher then Authenticate */ 1604 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 1605 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 1606 session->ext_params.aead_ctxt.auth_cipher_text = true; 1607 dpaa2_sec_aead_chain_init(dev, xform, session); 1608 1609 /* Authenticate then Cipher */ 1610 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 1611 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 1612 session->ext_params.aead_ctxt.auth_cipher_text = false; 1613 dpaa2_sec_aead_chain_init(dev, xform, session); 1614 1615 /* AEAD operation for AES-GCM kind of Algorithms */ 1616 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && 1617 xform->next == NULL) { 1618 dpaa2_sec_aead_init(dev, xform, session); 1619 1620 } else { 1621 RTE_LOG(ERR, PMD, "Invalid crypto type\n"); 1622 return -EINVAL; 1623 } 1624 1625 return 0; 1626 } 1627 1628 static int 1629 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, 1630 struct rte_security_session_conf *conf, 1631 void *sess) 1632 { 1633 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec; 1634 struct rte_crypto_auth_xform *auth_xform; 1635 struct rte_crypto_cipher_xform *cipher_xform; 1636 dpaa2_sec_session *session = (dpaa2_sec_session *)sess; 1637 struct ctxt_priv *priv; 1638 struct ipsec_encap_pdb encap_pdb; 1639 struct ipsec_decap_pdb decap_pdb; 1640 struct alginfo authdata, cipherdata; 1641 unsigned int bufsize; 1642 struct sec_flow_context *flc; 1643 1644 PMD_INIT_FUNC_TRACE(); 1645 1646 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 1647 cipher_xform = &conf->crypto_xform->cipher; 1648 auth_xform = &conf->crypto_xform->next->auth; 1649 } else { 1650 auth_xform = &conf->crypto_xform->auth; 1651 cipher_xform = &conf->crypto_xform->next->cipher; 1652 } 1653 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1654 sizeof(struct ctxt_priv) + 1655 sizeof(struct sec_flc_desc), 1656 RTE_CACHE_LINE_SIZE); 1657 1658 if (priv == NULL) { 1659 RTE_LOG(ERR, PMD, "\nNo memory for priv CTXT"); 1660 return -ENOMEM; 1661 } 1662 1663 flc = &priv->flc_desc[0].flc; 1664 1665 session->ctxt_type = DPAA2_SEC_IPSEC; 1666 session->cipher_key.data = rte_zmalloc(NULL, 1667 cipher_xform->key.length, 1668 RTE_CACHE_LINE_SIZE); 1669 if (session->cipher_key.data == NULL && 1670 cipher_xform->key.length > 0) { 1671 RTE_LOG(ERR, PMD, "No Memory for cipher key\n"); 1672 rte_free(priv); 1673 return -ENOMEM; 1674 } 1675 1676 session->cipher_key.length = cipher_xform->key.length; 1677 session->auth_key.data = rte_zmalloc(NULL, 1678 auth_xform->key.length, 1679 RTE_CACHE_LINE_SIZE); 1680 if (session->auth_key.data == NULL && 1681 auth_xform->key.length > 0) { 1682 RTE_LOG(ERR, PMD, "No Memory for auth key\n"); 1683 rte_free(session->cipher_key.data); 1684 rte_free(priv); 1685 return -ENOMEM; 1686 } 1687 session->auth_key.length = auth_xform->key.length; 1688 memcpy(session->cipher_key.data, cipher_xform->key.data, 1689 cipher_xform->key.length); 1690 memcpy(session->auth_key.data, auth_xform->key.data, 1691 auth_xform->key.length); 1692 1693 authdata.key = (uint64_t)session->auth_key.data; 1694 authdata.keylen = session->auth_key.length; 1695 authdata.key_enc_flags = 0; 1696 authdata.key_type = RTA_DATA_IMM; 1697 switch (auth_xform->algo) { 1698 case RTE_CRYPTO_AUTH_SHA1_HMAC: 1699 authdata.algtype = OP_PCL_IPSEC_HMAC_SHA1_96; 1700 authdata.algmode = OP_ALG_AAI_HMAC; 1701 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 1702 break; 1703 case RTE_CRYPTO_AUTH_MD5_HMAC: 1704 authdata.algtype = OP_PCL_IPSEC_HMAC_MD5_96; 1705 authdata.algmode = OP_ALG_AAI_HMAC; 1706 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 1707 break; 1708 case RTE_CRYPTO_AUTH_SHA256_HMAC: 1709 authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128; 1710 authdata.algmode = OP_ALG_AAI_HMAC; 1711 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 1712 break; 1713 case RTE_CRYPTO_AUTH_SHA384_HMAC: 1714 authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192; 1715 authdata.algmode = OP_ALG_AAI_HMAC; 1716 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 1717 break; 1718 case RTE_CRYPTO_AUTH_SHA512_HMAC: 1719 authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256; 1720 authdata.algmode = OP_ALG_AAI_HMAC; 1721 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 1722 break; 1723 case RTE_CRYPTO_AUTH_AES_CMAC: 1724 authdata.algtype = OP_PCL_IPSEC_AES_CMAC_96; 1725 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC; 1726 break; 1727 case RTE_CRYPTO_AUTH_NULL: 1728 authdata.algtype = OP_PCL_IPSEC_HMAC_NULL; 1729 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 1730 break; 1731 case RTE_CRYPTO_AUTH_SHA224_HMAC: 1732 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 1733 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 1734 case RTE_CRYPTO_AUTH_SHA1: 1735 case RTE_CRYPTO_AUTH_SHA256: 1736 case RTE_CRYPTO_AUTH_SHA512: 1737 case RTE_CRYPTO_AUTH_SHA224: 1738 case RTE_CRYPTO_AUTH_SHA384: 1739 case RTE_CRYPTO_AUTH_MD5: 1740 case RTE_CRYPTO_AUTH_AES_GMAC: 1741 case RTE_CRYPTO_AUTH_KASUMI_F9: 1742 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 1743 case RTE_CRYPTO_AUTH_ZUC_EIA3: 1744 RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n", 1745 auth_xform->algo); 1746 goto out; 1747 default: 1748 RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n", 1749 auth_xform->algo); 1750 goto out; 1751 } 1752 cipherdata.key = (uint64_t)session->cipher_key.data; 1753 cipherdata.keylen = session->cipher_key.length; 1754 cipherdata.key_enc_flags = 0; 1755 cipherdata.key_type = RTA_DATA_IMM; 1756 1757 switch (cipher_xform->algo) { 1758 case RTE_CRYPTO_CIPHER_AES_CBC: 1759 cipherdata.algtype = OP_PCL_IPSEC_AES_CBC; 1760 cipherdata.algmode = OP_ALG_AAI_CBC; 1761 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 1762 break; 1763 case RTE_CRYPTO_CIPHER_3DES_CBC: 1764 cipherdata.algtype = OP_PCL_IPSEC_3DES; 1765 cipherdata.algmode = OP_ALG_AAI_CBC; 1766 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 1767 break; 1768 case RTE_CRYPTO_CIPHER_AES_CTR: 1769 cipherdata.algtype = OP_PCL_IPSEC_AES_CTR; 1770 cipherdata.algmode = OP_ALG_AAI_CTR; 1771 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 1772 break; 1773 case RTE_CRYPTO_CIPHER_NULL: 1774 cipherdata.algtype = OP_PCL_IPSEC_NULL; 1775 break; 1776 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 1777 case RTE_CRYPTO_CIPHER_3DES_ECB: 1778 case RTE_CRYPTO_CIPHER_AES_ECB: 1779 case RTE_CRYPTO_CIPHER_KASUMI_F8: 1780 RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n", 1781 cipher_xform->algo); 1782 goto out; 1783 default: 1784 RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n", 1785 cipher_xform->algo); 1786 goto out; 1787 } 1788 1789 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 1790 struct ip ip4_hdr; 1791 1792 flc->dhr = SEC_FLC_DHR_OUTBOUND; 1793 ip4_hdr.ip_v = IPVERSION; 1794 ip4_hdr.ip_hl = 5; 1795 ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr)); 1796 ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp; 1797 ip4_hdr.ip_id = 0; 1798 ip4_hdr.ip_off = 0; 1799 ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl; 1800 ip4_hdr.ip_p = 0x32; 1801 ip4_hdr.ip_sum = 0; 1802 ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip; 1803 ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip; 1804 ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)&ip4_hdr, 1805 sizeof(struct ip)); 1806 1807 /* For Sec Proto only one descriptor is required. */ 1808 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb)); 1809 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) | 1810 PDBOPTS_ESP_OIHI_PDB_INL | 1811 PDBOPTS_ESP_IVSRC | 1812 PDBHMO_ESP_ENCAP_DTTL; 1813 encap_pdb.spi = ipsec_xform->spi; 1814 encap_pdb.ip_hdr_len = sizeof(struct ip); 1815 1816 session->dir = DIR_ENC; 1817 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc, 1818 1, 0, &encap_pdb, 1819 (uint8_t *)&ip4_hdr, 1820 &cipherdata, &authdata); 1821 } else if (ipsec_xform->direction == 1822 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { 1823 flc->dhr = SEC_FLC_DHR_INBOUND; 1824 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb)); 1825 decap_pdb.options = sizeof(struct ip) << 16; 1826 session->dir = DIR_DEC; 1827 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc, 1828 1, 0, &decap_pdb, &cipherdata, &authdata); 1829 } else 1830 goto out; 1831 flc->word1_sdl = (uint8_t)bufsize; 1832 1833 /* Enable the stashing control bit */ 1834 DPAA2_SET_FLC_RSC(flc); 1835 flc->word2_rflc_31_0 = lower_32_bits( 1836 (uint64_t)&(((struct dpaa2_sec_qp *) 1837 dev->data->queue_pairs[0])->rx_vq) | 0x14); 1838 flc->word3_rflc_63_32 = upper_32_bits( 1839 (uint64_t)&(((struct dpaa2_sec_qp *) 1840 dev->data->queue_pairs[0])->rx_vq)); 1841 1842 /* Set EWS bit i.e. enable write-safe */ 1843 DPAA2_SET_FLC_EWS(flc); 1844 /* Set BS = 1 i.e reuse input buffers as output buffers */ 1845 DPAA2_SET_FLC_REUSE_BS(flc); 1846 /* Set FF = 10; reuse input buffers if they provide sufficient space */ 1847 DPAA2_SET_FLC_REUSE_FF(flc); 1848 1849 session->ctxt = priv; 1850 1851 return 0; 1852 out: 1853 rte_free(session->auth_key.data); 1854 rte_free(session->cipher_key.data); 1855 rte_free(priv); 1856 return -1; 1857 } 1858 1859 static int 1860 dpaa2_sec_security_session_create(void *dev, 1861 struct rte_security_session_conf *conf, 1862 struct rte_security_session *sess, 1863 struct rte_mempool *mempool) 1864 { 1865 void *sess_private_data; 1866 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; 1867 int ret; 1868 1869 if (rte_mempool_get(mempool, &sess_private_data)) { 1870 CDEV_LOG_ERR( 1871 "Couldn't get object from session mempool"); 1872 return -ENOMEM; 1873 } 1874 1875 switch (conf->protocol) { 1876 case RTE_SECURITY_PROTOCOL_IPSEC: 1877 ret = dpaa2_sec_set_ipsec_session(cdev, conf, 1878 sess_private_data); 1879 break; 1880 case RTE_SECURITY_PROTOCOL_MACSEC: 1881 return -ENOTSUP; 1882 default: 1883 return -EINVAL; 1884 } 1885 if (ret != 0) { 1886 PMD_DRV_LOG(ERR, 1887 "DPAA2 PMD: failed to configure session parameters"); 1888 1889 /* Return session to mempool */ 1890 rte_mempool_put(mempool, sess_private_data); 1891 return ret; 1892 } 1893 1894 set_sec_session_private_data(sess, sess_private_data); 1895 1896 return ret; 1897 } 1898 1899 /** Clear the memory of session so it doesn't leave key material behind */ 1900 static int 1901 dpaa2_sec_security_session_destroy(void *dev __rte_unused, 1902 struct rte_security_session *sess) 1903 { 1904 PMD_INIT_FUNC_TRACE(); 1905 void *sess_priv = get_sec_session_private_data(sess); 1906 1907 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv; 1908 1909 if (sess_priv) { 1910 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 1911 1912 rte_free(s->ctxt); 1913 rte_free(s->cipher_key.data); 1914 rte_free(s->auth_key.data); 1915 memset(sess, 0, sizeof(dpaa2_sec_session)); 1916 set_sec_session_private_data(sess, NULL); 1917 rte_mempool_put(sess_mp, sess_priv); 1918 } 1919 return 0; 1920 } 1921 1922 static int 1923 dpaa2_sec_session_configure(struct rte_cryptodev *dev, 1924 struct rte_crypto_sym_xform *xform, 1925 struct rte_cryptodev_sym_session *sess, 1926 struct rte_mempool *mempool) 1927 { 1928 void *sess_private_data; 1929 int ret; 1930 1931 if (rte_mempool_get(mempool, &sess_private_data)) { 1932 CDEV_LOG_ERR( 1933 "Couldn't get object from session mempool"); 1934 return -ENOMEM; 1935 } 1936 1937 ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data); 1938 if (ret != 0) { 1939 PMD_DRV_LOG(ERR, "DPAA2 PMD: failed to configure " 1940 "session parameters"); 1941 1942 /* Return session to mempool */ 1943 rte_mempool_put(mempool, sess_private_data); 1944 return ret; 1945 } 1946 1947 set_session_private_data(sess, dev->driver_id, 1948 sess_private_data); 1949 1950 return 0; 1951 } 1952 1953 /** Clear the memory of session so it doesn't leave key material behind */ 1954 static void 1955 dpaa2_sec_session_clear(struct rte_cryptodev *dev, 1956 struct rte_cryptodev_sym_session *sess) 1957 { 1958 PMD_INIT_FUNC_TRACE(); 1959 uint8_t index = dev->driver_id; 1960 void *sess_priv = get_session_private_data(sess, index); 1961 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv; 1962 1963 if (sess_priv) { 1964 rte_free(s->ctxt); 1965 rte_free(s->cipher_key.data); 1966 rte_free(s->auth_key.data); 1967 memset(sess, 0, sizeof(dpaa2_sec_session)); 1968 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 1969 set_session_private_data(sess, index, NULL); 1970 rte_mempool_put(sess_mp, sess_priv); 1971 } 1972 } 1973 1974 static int 1975 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused, 1976 struct rte_cryptodev_config *config __rte_unused) 1977 { 1978 PMD_INIT_FUNC_TRACE(); 1979 1980 return 0; 1981 } 1982 1983 static int 1984 dpaa2_sec_dev_start(struct rte_cryptodev *dev) 1985 { 1986 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 1987 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 1988 struct dpseci_attr attr; 1989 struct dpaa2_queue *dpaa2_q; 1990 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 1991 dev->data->queue_pairs; 1992 struct dpseci_rx_queue_attr rx_attr; 1993 struct dpseci_tx_queue_attr tx_attr; 1994 int ret, i; 1995 1996 PMD_INIT_FUNC_TRACE(); 1997 1998 memset(&attr, 0, sizeof(struct dpseci_attr)); 1999 2000 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token); 2001 if (ret) { 2002 PMD_INIT_LOG(ERR, "DPSECI with HW_ID = %d ENABLE FAILED\n", 2003 priv->hw_id); 2004 goto get_attr_failure; 2005 } 2006 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr); 2007 if (ret) { 2008 PMD_INIT_LOG(ERR, 2009 "DPSEC ATTRIBUTE READ FAILED, disabling DPSEC\n"); 2010 goto get_attr_failure; 2011 } 2012 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) { 2013 dpaa2_q = &qp[i]->rx_vq; 2014 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 2015 &rx_attr); 2016 dpaa2_q->fqid = rx_attr.fqid; 2017 PMD_INIT_LOG(DEBUG, "rx_fqid: %d", dpaa2_q->fqid); 2018 } 2019 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) { 2020 dpaa2_q = &qp[i]->tx_vq; 2021 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 2022 &tx_attr); 2023 dpaa2_q->fqid = tx_attr.fqid; 2024 PMD_INIT_LOG(DEBUG, "tx_fqid: %d", dpaa2_q->fqid); 2025 } 2026 2027 return 0; 2028 get_attr_failure: 2029 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 2030 return -1; 2031 } 2032 2033 static void 2034 dpaa2_sec_dev_stop(struct rte_cryptodev *dev) 2035 { 2036 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 2037 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 2038 int ret; 2039 2040 PMD_INIT_FUNC_TRACE(); 2041 2042 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 2043 if (ret) { 2044 PMD_INIT_LOG(ERR, "Failure in disabling dpseci %d device", 2045 priv->hw_id); 2046 return; 2047 } 2048 2049 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token); 2050 if (ret < 0) { 2051 PMD_INIT_LOG(ERR, "SEC Device cannot be reset:Error = %0x\n", 2052 ret); 2053 return; 2054 } 2055 } 2056 2057 static int 2058 dpaa2_sec_dev_close(struct rte_cryptodev *dev) 2059 { 2060 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 2061 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 2062 int ret; 2063 2064 PMD_INIT_FUNC_TRACE(); 2065 2066 /* Function is reverse of dpaa2_sec_dev_init. 2067 * It does the following: 2068 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id 2069 * 2. Close the DPSECI device 2070 * 3. Free the allocated resources. 2071 */ 2072 2073 /*Close the device at underlying layer*/ 2074 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token); 2075 if (ret) { 2076 PMD_INIT_LOG(ERR, "Failure closing dpseci device with" 2077 " error code %d\n", ret); 2078 return -1; 2079 } 2080 2081 /*Free the allocated memory for ethernet private data and dpseci*/ 2082 priv->hw = NULL; 2083 rte_free(dpseci); 2084 2085 return 0; 2086 } 2087 2088 static void 2089 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev, 2090 struct rte_cryptodev_info *info) 2091 { 2092 struct dpaa2_sec_dev_private *internals = dev->data->dev_private; 2093 2094 PMD_INIT_FUNC_TRACE(); 2095 if (info != NULL) { 2096 info->max_nb_queue_pairs = internals->max_nb_queue_pairs; 2097 info->feature_flags = dev->feature_flags; 2098 info->capabilities = dpaa2_sec_capabilities; 2099 info->sym.max_nb_sessions = internals->max_nb_sessions; 2100 info->driver_id = cryptodev_driver_id; 2101 } 2102 } 2103 2104 static 2105 void dpaa2_sec_stats_get(struct rte_cryptodev *dev, 2106 struct rte_cryptodev_stats *stats) 2107 { 2108 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 2109 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 2110 struct dpseci_sec_counters counters = {0}; 2111 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 2112 dev->data->queue_pairs; 2113 int ret, i; 2114 2115 PMD_INIT_FUNC_TRACE(); 2116 if (stats == NULL) { 2117 PMD_DRV_LOG(ERR, "invalid stats ptr NULL"); 2118 return; 2119 } 2120 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 2121 if (qp[i] == NULL) { 2122 PMD_DRV_LOG(DEBUG, "Uninitialised queue pair"); 2123 continue; 2124 } 2125 2126 stats->enqueued_count += qp[i]->tx_vq.tx_pkts; 2127 stats->dequeued_count += qp[i]->rx_vq.rx_pkts; 2128 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts; 2129 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts; 2130 } 2131 2132 ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token, 2133 &counters); 2134 if (ret) { 2135 PMD_DRV_LOG(ERR, "dpseci_get_sec_counters failed\n"); 2136 } else { 2137 PMD_DRV_LOG(INFO, "dpseci hw stats:" 2138 "\n\tNumber of Requests Dequeued = %lu" 2139 "\n\tNumber of Outbound Encrypt Requests = %lu" 2140 "\n\tNumber of Inbound Decrypt Requests = %lu" 2141 "\n\tNumber of Outbound Bytes Encrypted = %lu" 2142 "\n\tNumber of Outbound Bytes Protected = %lu" 2143 "\n\tNumber of Inbound Bytes Decrypted = %lu" 2144 "\n\tNumber of Inbound Bytes Validated = %lu", 2145 counters.dequeued_requests, 2146 counters.ob_enc_requests, 2147 counters.ib_dec_requests, 2148 counters.ob_enc_bytes, 2149 counters.ob_prot_bytes, 2150 counters.ib_dec_bytes, 2151 counters.ib_valid_bytes); 2152 } 2153 } 2154 2155 static 2156 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev) 2157 { 2158 int i; 2159 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 2160 (dev->data->queue_pairs); 2161 2162 PMD_INIT_FUNC_TRACE(); 2163 2164 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 2165 if (qp[i] == NULL) { 2166 PMD_DRV_LOG(DEBUG, "Uninitialised queue pair"); 2167 continue; 2168 } 2169 qp[i]->tx_vq.rx_pkts = 0; 2170 qp[i]->tx_vq.tx_pkts = 0; 2171 qp[i]->tx_vq.err_pkts = 0; 2172 qp[i]->rx_vq.rx_pkts = 0; 2173 qp[i]->rx_vq.tx_pkts = 0; 2174 qp[i]->rx_vq.err_pkts = 0; 2175 } 2176 } 2177 2178 static struct rte_cryptodev_ops crypto_ops = { 2179 .dev_configure = dpaa2_sec_dev_configure, 2180 .dev_start = dpaa2_sec_dev_start, 2181 .dev_stop = dpaa2_sec_dev_stop, 2182 .dev_close = dpaa2_sec_dev_close, 2183 .dev_infos_get = dpaa2_sec_dev_infos_get, 2184 .stats_get = dpaa2_sec_stats_get, 2185 .stats_reset = dpaa2_sec_stats_reset, 2186 .queue_pair_setup = dpaa2_sec_queue_pair_setup, 2187 .queue_pair_release = dpaa2_sec_queue_pair_release, 2188 .queue_pair_start = dpaa2_sec_queue_pair_start, 2189 .queue_pair_stop = dpaa2_sec_queue_pair_stop, 2190 .queue_pair_count = dpaa2_sec_queue_pair_count, 2191 .session_get_size = dpaa2_sec_session_get_size, 2192 .session_configure = dpaa2_sec_session_configure, 2193 .session_clear = dpaa2_sec_session_clear, 2194 }; 2195 2196 static const struct rte_security_capability * 2197 dpaa2_sec_capabilities_get(void *device __rte_unused) 2198 { 2199 return dpaa2_sec_security_cap; 2200 } 2201 2202 struct rte_security_ops dpaa2_sec_security_ops = { 2203 .session_create = dpaa2_sec_security_session_create, 2204 .session_update = NULL, 2205 .session_stats_get = NULL, 2206 .session_destroy = dpaa2_sec_security_session_destroy, 2207 .set_pkt_metadata = NULL, 2208 .capabilities_get = dpaa2_sec_capabilities_get 2209 }; 2210 2211 static int 2212 dpaa2_sec_uninit(const struct rte_cryptodev *dev) 2213 { 2214 struct dpaa2_sec_dev_private *internals = dev->data->dev_private; 2215 2216 rte_free(dev->security_ctx); 2217 2218 rte_mempool_free(internals->fle_pool); 2219 2220 PMD_INIT_LOG(INFO, "Closing DPAA2_SEC device %s on numa socket %u\n", 2221 dev->data->name, rte_socket_id()); 2222 2223 return 0; 2224 } 2225 2226 static int 2227 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev) 2228 { 2229 struct dpaa2_sec_dev_private *internals; 2230 struct rte_device *dev = cryptodev->device; 2231 struct rte_dpaa2_device *dpaa2_dev; 2232 struct rte_security_ctx *security_instance; 2233 struct fsl_mc_io *dpseci; 2234 uint16_t token; 2235 struct dpseci_attr attr; 2236 int retcode, hw_id; 2237 char str[20]; 2238 2239 PMD_INIT_FUNC_TRACE(); 2240 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 2241 if (dpaa2_dev == NULL) { 2242 PMD_INIT_LOG(ERR, "dpaa2_device not found\n"); 2243 return -1; 2244 } 2245 hw_id = dpaa2_dev->object_id; 2246 2247 cryptodev->driver_id = cryptodev_driver_id; 2248 cryptodev->dev_ops = &crypto_ops; 2249 2250 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst; 2251 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst; 2252 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 2253 RTE_CRYPTODEV_FF_HW_ACCELERATED | 2254 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 2255 RTE_CRYPTODEV_FF_SECURITY; 2256 2257 internals = cryptodev->data->dev_private; 2258 internals->max_nb_sessions = RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS; 2259 2260 /* 2261 * For secondary processes, we don't initialise any further as primary 2262 * has already done this work. Only check we don't need a different 2263 * RX function 2264 */ 2265 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2266 PMD_INIT_LOG(DEBUG, "Device already init by primary process"); 2267 return 0; 2268 } 2269 2270 /* Initialize security_ctx only for primary process*/ 2271 security_instance = rte_malloc("rte_security_instances_ops", 2272 sizeof(struct rte_security_ctx), 0); 2273 if (security_instance == NULL) 2274 return -ENOMEM; 2275 security_instance->device = (void *)cryptodev; 2276 security_instance->ops = &dpaa2_sec_security_ops; 2277 security_instance->sess_cnt = 0; 2278 cryptodev->security_ctx = security_instance; 2279 2280 /*Open the rte device via MC and save the handle for further use*/ 2281 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1, 2282 sizeof(struct fsl_mc_io), 0); 2283 if (!dpseci) { 2284 PMD_INIT_LOG(ERR, 2285 "Error in allocating the memory for dpsec object"); 2286 return -1; 2287 } 2288 dpseci->regs = rte_mcp_ptr_list[0]; 2289 2290 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token); 2291 if (retcode != 0) { 2292 PMD_INIT_LOG(ERR, "Cannot open the dpsec device: Error = %x", 2293 retcode); 2294 goto init_error; 2295 } 2296 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr); 2297 if (retcode != 0) { 2298 PMD_INIT_LOG(ERR, 2299 "Cannot get dpsec device attributed: Error = %x", 2300 retcode); 2301 goto init_error; 2302 } 2303 sprintf(cryptodev->data->name, "dpsec-%u", hw_id); 2304 2305 internals->max_nb_queue_pairs = attr.num_tx_queues; 2306 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs; 2307 internals->hw = dpseci; 2308 internals->token = token; 2309 2310 sprintf(str, "fle_pool_%d", cryptodev->data->dev_id); 2311 internals->fle_pool = rte_mempool_create((const char *)str, 2312 FLE_POOL_NUM_BUFS, 2313 FLE_POOL_BUF_SIZE, 2314 FLE_POOL_CACHE_SIZE, 0, 2315 NULL, NULL, NULL, NULL, 2316 SOCKET_ID_ANY, 0); 2317 if (!internals->fle_pool) { 2318 RTE_LOG(ERR, PMD, "%s create failed\n", str); 2319 goto init_error; 2320 } 2321 2322 PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name); 2323 return 0; 2324 2325 init_error: 2326 PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name); 2327 2328 /* dpaa2_sec_uninit(crypto_dev_name); */ 2329 return -EFAULT; 2330 } 2331 2332 static int 2333 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv, 2334 struct rte_dpaa2_device *dpaa2_dev) 2335 { 2336 struct rte_cryptodev *cryptodev; 2337 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 2338 2339 int retval; 2340 2341 sprintf(cryptodev_name, "dpsec-%d", dpaa2_dev->object_id); 2342 2343 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id()); 2344 if (cryptodev == NULL) 2345 return -ENOMEM; 2346 2347 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 2348 cryptodev->data->dev_private = rte_zmalloc_socket( 2349 "cryptodev private structure", 2350 sizeof(struct dpaa2_sec_dev_private), 2351 RTE_CACHE_LINE_SIZE, 2352 rte_socket_id()); 2353 2354 if (cryptodev->data->dev_private == NULL) 2355 rte_panic("Cannot allocate memzone for private " 2356 "device data"); 2357 } 2358 2359 dpaa2_dev->cryptodev = cryptodev; 2360 cryptodev->device = &dpaa2_dev->device; 2361 cryptodev->device->driver = &dpaa2_drv->driver; 2362 2363 /* init user callbacks */ 2364 TAILQ_INIT(&(cryptodev->link_intr_cbs)); 2365 2366 /* Invoke PMD device initialization function */ 2367 retval = dpaa2_sec_dev_init(cryptodev); 2368 if (retval == 0) 2369 return 0; 2370 2371 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 2372 rte_free(cryptodev->data->dev_private); 2373 2374 cryptodev->attached = RTE_CRYPTODEV_DETACHED; 2375 2376 return -ENXIO; 2377 } 2378 2379 static int 2380 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev) 2381 { 2382 struct rte_cryptodev *cryptodev; 2383 int ret; 2384 2385 cryptodev = dpaa2_dev->cryptodev; 2386 if (cryptodev == NULL) 2387 return -ENODEV; 2388 2389 ret = dpaa2_sec_uninit(cryptodev); 2390 if (ret) 2391 return ret; 2392 2393 return rte_cryptodev_pmd_destroy(cryptodev); 2394 } 2395 2396 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = { 2397 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA, 2398 .drv_type = DPAA2_CRYPTO, 2399 .driver = { 2400 .name = "DPAA2 SEC PMD" 2401 }, 2402 .probe = cryptodev_dpaa2_sec_probe, 2403 .remove = cryptodev_dpaa2_sec_remove, 2404 }; 2405 2406 static struct cryptodev_driver dpaa2_sec_crypto_drv; 2407 2408 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver); 2409 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, rte_dpaa2_sec_driver, 2410 cryptodev_driver_id); 2411