1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 5 * Copyright 2016 NXP. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Freescale Semiconductor, Inc nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <time.h> 35 #include <net/if.h> 36 37 #include <rte_mbuf.h> 38 #include <rte_cryptodev.h> 39 #include <rte_malloc.h> 40 #include <rte_memcpy.h> 41 #include <rte_string_fns.h> 42 #include <rte_cycles.h> 43 #include <rte_kvargs.h> 44 #include <rte_dev.h> 45 #include <rte_cryptodev_pmd.h> 46 #include <rte_common.h> 47 #include <rte_fslmc.h> 48 #include <fslmc_vfio.h> 49 #include <dpaa2_hw_pvt.h> 50 #include <dpaa2_hw_dpio.h> 51 #include <dpaa2_hw_mempool.h> 52 #include <fsl_dpseci.h> 53 #include <fsl_mc_sys.h> 54 55 #include "dpaa2_sec_priv.h" 56 #include "dpaa2_sec_logs.h" 57 58 /* RTA header files */ 59 #include <hw/desc/ipsec.h> 60 #include <hw/desc/algo.h> 61 62 /* Minimum job descriptor consists of a oneword job descriptor HEADER and 63 * a pointer to the shared descriptor 64 */ 65 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ) 66 #define FSL_VENDOR_ID 0x1957 67 #define FSL_DEVICE_ID 0x410 68 #define FSL_SUBSYSTEM_SEC 1 69 #define FSL_MC_DPSECI_DEVID 3 70 71 #define NO_PREFETCH 0 72 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */ 73 #define FLE_POOL_NUM_BUFS 32000 74 #define FLE_POOL_BUF_SIZE 256 75 #define FLE_POOL_CACHE_SIZE 512 76 77 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8; 78 79 static uint8_t cryptodev_driver_id; 80 81 static inline int 82 build_authenc_gcm_fd(dpaa2_sec_session *sess, 83 struct rte_crypto_op *op, 84 struct qbman_fd *fd, uint16_t bpid) 85 { 86 struct rte_crypto_sym_op *sym_op = op->sym; 87 struct ctxt_priv *priv = sess->ctxt; 88 struct qbman_fle *fle, *sge; 89 struct sec_flow_context *flc; 90 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len; 91 int icv_len = sess->digest_length, retval; 92 uint8_t *old_icv; 93 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 94 sess->iv.offset); 95 96 PMD_INIT_FUNC_TRACE(); 97 98 /* TODO we are using the first FLE entry to store Mbuf and session ctxt. 99 * Currently we donot know which FLE has the mbuf stored. 100 * So while retreiving we can go back 1 FLE from the FD -ADDR 101 * to get the MBUF Addr from the previous FLE. 102 * We can have a better approach to use the inline Mbuf 103 */ 104 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 105 if (retval) { 106 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n"); 107 return -1; 108 } 109 memset(fle, 0, FLE_POOL_BUF_SIZE); 110 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op)); 111 DPAA2_FLE_SAVE_CTXT(fle, priv); 112 fle = fle + 1; 113 sge = fle + 2; 114 if (likely(bpid < MAX_BPID)) { 115 DPAA2_SET_FD_BPID(fd, bpid); 116 DPAA2_SET_FLE_BPID(fle, bpid); 117 DPAA2_SET_FLE_BPID(fle + 1, bpid); 118 DPAA2_SET_FLE_BPID(sge, bpid); 119 DPAA2_SET_FLE_BPID(sge + 1, bpid); 120 DPAA2_SET_FLE_BPID(sge + 2, bpid); 121 DPAA2_SET_FLE_BPID(sge + 3, bpid); 122 } else { 123 DPAA2_SET_FD_IVP(fd); 124 DPAA2_SET_FLE_IVP(fle); 125 DPAA2_SET_FLE_IVP((fle + 1)); 126 DPAA2_SET_FLE_IVP(sge); 127 DPAA2_SET_FLE_IVP((sge + 1)); 128 DPAA2_SET_FLE_IVP((sge + 2)); 129 DPAA2_SET_FLE_IVP((sge + 3)); 130 } 131 132 /* Save the shared descriptor */ 133 flc = &priv->flc_desc[0].flc; 134 /* Configure FD as a FRAME LIST */ 135 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 136 DPAA2_SET_FD_COMPOUND_FMT(fd); 137 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 138 139 PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n" 140 "iv-len=%d data_off: 0x%x\n", 141 sym_op->aead.data.offset, 142 sym_op->aead.data.length, 143 sym_op->aead.digest.length, 144 sess->iv.length, 145 sym_op->m_src->data_off); 146 147 /* Configure Output FLE with Scatter/Gather Entry */ 148 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 149 if (auth_only_len) 150 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 151 fle->length = (sess->dir == DIR_ENC) ? 152 (sym_op->aead.data.length + icv_len + auth_only_len) : 153 sym_op->aead.data.length + auth_only_len; 154 155 DPAA2_SET_FLE_SG_EXT(fle); 156 157 /* Configure Output SGE for Encap/Decap */ 158 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 159 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + 160 sym_op->m_src->data_off - auth_only_len); 161 sge->length = sym_op->aead.data.length + auth_only_len; 162 163 if (sess->dir == DIR_ENC) { 164 sge++; 165 DPAA2_SET_FLE_ADDR(sge, 166 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data)); 167 sge->length = sess->digest_length; 168 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length + 169 sess->iv.length + auth_only_len)); 170 } 171 DPAA2_SET_FLE_FIN(sge); 172 173 sge++; 174 fle++; 175 176 /* Configure Input FLE with Scatter/Gather Entry */ 177 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 178 DPAA2_SET_FLE_SG_EXT(fle); 179 DPAA2_SET_FLE_FIN(fle); 180 fle->length = (sess->dir == DIR_ENC) ? 181 (sym_op->aead.data.length + sess->iv.length + auth_only_len) : 182 (sym_op->aead.data.length + sess->iv.length + auth_only_len + 183 sess->digest_length); 184 185 /* Configure Input SGE for Encap/Decap */ 186 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr)); 187 sge->length = sess->iv.length; 188 sge++; 189 if (auth_only_len) { 190 DPAA2_SET_FLE_ADDR(sge, 191 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data)); 192 sge->length = auth_only_len; 193 DPAA2_SET_FLE_BPID(sge, bpid); 194 sge++; 195 } 196 197 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 198 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + 199 sym_op->m_src->data_off); 200 sge->length = sym_op->aead.data.length; 201 if (sess->dir == DIR_DEC) { 202 sge++; 203 old_icv = (uint8_t *)(sge + 1); 204 memcpy(old_icv, sym_op->aead.digest.data, 205 sess->digest_length); 206 memset(sym_op->aead.digest.data, 0, sess->digest_length); 207 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 208 sge->length = sess->digest_length; 209 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length + 210 sess->digest_length + 211 sess->iv.length + 212 auth_only_len)); 213 } 214 DPAA2_SET_FLE_FIN(sge); 215 216 if (auth_only_len) { 217 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 218 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 219 } 220 221 return 0; 222 } 223 224 static inline int 225 build_authenc_fd(dpaa2_sec_session *sess, 226 struct rte_crypto_op *op, 227 struct qbman_fd *fd, uint16_t bpid) 228 { 229 struct rte_crypto_sym_op *sym_op = op->sym; 230 struct ctxt_priv *priv = sess->ctxt; 231 struct qbman_fle *fle, *sge; 232 struct sec_flow_context *flc; 233 uint32_t auth_only_len = sym_op->auth.data.length - 234 sym_op->cipher.data.length; 235 int icv_len = sess->digest_length, retval; 236 uint8_t *old_icv; 237 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 238 sess->iv.offset); 239 240 PMD_INIT_FUNC_TRACE(); 241 242 /* we are using the first FLE entry to store Mbuf. 243 * Currently we donot know which FLE has the mbuf stored. 244 * So while retreiving we can go back 1 FLE from the FD -ADDR 245 * to get the MBUF Addr from the previous FLE. 246 * We can have a better approach to use the inline Mbuf 247 */ 248 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 249 if (retval) { 250 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n"); 251 return -1; 252 } 253 memset(fle, 0, FLE_POOL_BUF_SIZE); 254 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op)); 255 DPAA2_FLE_SAVE_CTXT(fle, priv); 256 fle = fle + 1; 257 sge = fle + 2; 258 if (likely(bpid < MAX_BPID)) { 259 DPAA2_SET_FD_BPID(fd, bpid); 260 DPAA2_SET_FLE_BPID(fle, bpid); 261 DPAA2_SET_FLE_BPID(fle + 1, bpid); 262 DPAA2_SET_FLE_BPID(sge, bpid); 263 DPAA2_SET_FLE_BPID(sge + 1, bpid); 264 DPAA2_SET_FLE_BPID(sge + 2, bpid); 265 DPAA2_SET_FLE_BPID(sge + 3, bpid); 266 } else { 267 DPAA2_SET_FD_IVP(fd); 268 DPAA2_SET_FLE_IVP(fle); 269 DPAA2_SET_FLE_IVP((fle + 1)); 270 DPAA2_SET_FLE_IVP(sge); 271 DPAA2_SET_FLE_IVP((sge + 1)); 272 DPAA2_SET_FLE_IVP((sge + 2)); 273 DPAA2_SET_FLE_IVP((sge + 3)); 274 } 275 276 /* Save the shared descriptor */ 277 flc = &priv->flc_desc[0].flc; 278 /* Configure FD as a FRAME LIST */ 279 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 280 DPAA2_SET_FD_COMPOUND_FMT(fd); 281 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 282 283 PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n" 284 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", 285 sym_op->auth.data.offset, 286 sym_op->auth.data.length, 287 sess->digest_length, 288 sym_op->cipher.data.offset, 289 sym_op->cipher.data.length, 290 sess->iv.length, 291 sym_op->m_src->data_off); 292 293 /* Configure Output FLE with Scatter/Gather Entry */ 294 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 295 if (auth_only_len) 296 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 297 fle->length = (sess->dir == DIR_ENC) ? 298 (sym_op->cipher.data.length + icv_len) : 299 sym_op->cipher.data.length; 300 301 DPAA2_SET_FLE_SG_EXT(fle); 302 303 /* Configure Output SGE for Encap/Decap */ 304 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 305 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + 306 sym_op->m_src->data_off); 307 sge->length = sym_op->cipher.data.length; 308 309 if (sess->dir == DIR_ENC) { 310 sge++; 311 DPAA2_SET_FLE_ADDR(sge, 312 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 313 sge->length = sess->digest_length; 314 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 315 sess->iv.length)); 316 } 317 DPAA2_SET_FLE_FIN(sge); 318 319 sge++; 320 fle++; 321 322 /* Configure Input FLE with Scatter/Gather Entry */ 323 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 324 DPAA2_SET_FLE_SG_EXT(fle); 325 DPAA2_SET_FLE_FIN(fle); 326 fle->length = (sess->dir == DIR_ENC) ? 327 (sym_op->auth.data.length + sess->iv.length) : 328 (sym_op->auth.data.length + sess->iv.length + 329 sess->digest_length); 330 331 /* Configure Input SGE for Encap/Decap */ 332 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 333 sge->length = sess->iv.length; 334 sge++; 335 336 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 337 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + 338 sym_op->m_src->data_off); 339 sge->length = sym_op->auth.data.length; 340 if (sess->dir == DIR_DEC) { 341 sge++; 342 old_icv = (uint8_t *)(sge + 1); 343 memcpy(old_icv, sym_op->auth.digest.data, 344 sess->digest_length); 345 memset(sym_op->auth.digest.data, 0, sess->digest_length); 346 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 347 sge->length = sess->digest_length; 348 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 349 sess->digest_length + 350 sess->iv.length)); 351 } 352 DPAA2_SET_FLE_FIN(sge); 353 if (auth_only_len) { 354 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 355 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 356 } 357 return 0; 358 } 359 360 static inline int 361 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 362 struct qbman_fd *fd, uint16_t bpid) 363 { 364 struct rte_crypto_sym_op *sym_op = op->sym; 365 struct qbman_fle *fle, *sge; 366 struct sec_flow_context *flc; 367 struct ctxt_priv *priv = sess->ctxt; 368 uint8_t *old_digest; 369 int retval; 370 371 PMD_INIT_FUNC_TRACE(); 372 373 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 374 if (retval) { 375 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n"); 376 return -1; 377 } 378 memset(fle, 0, FLE_POOL_BUF_SIZE); 379 /* TODO we are using the first FLE entry to store Mbuf. 380 * Currently we donot know which FLE has the mbuf stored. 381 * So while retreiving we can go back 1 FLE from the FD -ADDR 382 * to get the MBUF Addr from the previous FLE. 383 * We can have a better approach to use the inline Mbuf 384 */ 385 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op)); 386 DPAA2_FLE_SAVE_CTXT(fle, priv); 387 fle = fle + 1; 388 389 if (likely(bpid < MAX_BPID)) { 390 DPAA2_SET_FD_BPID(fd, bpid); 391 DPAA2_SET_FLE_BPID(fle, bpid); 392 DPAA2_SET_FLE_BPID(fle + 1, bpid); 393 } else { 394 DPAA2_SET_FD_IVP(fd); 395 DPAA2_SET_FLE_IVP(fle); 396 DPAA2_SET_FLE_IVP((fle + 1)); 397 } 398 flc = &priv->flc_desc[DESC_INITFINAL].flc; 399 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 400 401 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 402 fle->length = sess->digest_length; 403 404 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 405 DPAA2_SET_FD_COMPOUND_FMT(fd); 406 fle++; 407 408 if (sess->dir == DIR_ENC) { 409 DPAA2_SET_FLE_ADDR(fle, 410 DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 411 DPAA2_SET_FLE_OFFSET(fle, sym_op->auth.data.offset + 412 sym_op->m_src->data_off); 413 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length); 414 fle->length = sym_op->auth.data.length; 415 } else { 416 sge = fle + 2; 417 DPAA2_SET_FLE_SG_EXT(fle); 418 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 419 420 if (likely(bpid < MAX_BPID)) { 421 DPAA2_SET_FLE_BPID(sge, bpid); 422 DPAA2_SET_FLE_BPID(sge + 1, bpid); 423 } else { 424 DPAA2_SET_FLE_IVP(sge); 425 DPAA2_SET_FLE_IVP((sge + 1)); 426 } 427 DPAA2_SET_FLE_ADDR(sge, 428 DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 429 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + 430 sym_op->m_src->data_off); 431 432 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length + 433 sess->digest_length); 434 sge->length = sym_op->auth.data.length; 435 sge++; 436 old_digest = (uint8_t *)(sge + 1); 437 rte_memcpy(old_digest, sym_op->auth.digest.data, 438 sess->digest_length); 439 memset(sym_op->auth.digest.data, 0, sess->digest_length); 440 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest)); 441 sge->length = sess->digest_length; 442 fle->length = sym_op->auth.data.length + 443 sess->digest_length; 444 DPAA2_SET_FLE_FIN(sge); 445 } 446 DPAA2_SET_FLE_FIN(fle); 447 448 return 0; 449 } 450 451 static int 452 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 453 struct qbman_fd *fd, uint16_t bpid) 454 { 455 struct rte_crypto_sym_op *sym_op = op->sym; 456 struct qbman_fle *fle, *sge; 457 int retval; 458 struct sec_flow_context *flc; 459 struct ctxt_priv *priv = sess->ctxt; 460 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 461 sess->iv.offset); 462 463 PMD_INIT_FUNC_TRACE(); 464 465 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 466 if (retval) { 467 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n"); 468 return -1; 469 } 470 memset(fle, 0, FLE_POOL_BUF_SIZE); 471 /* TODO we are using the first FLE entry to store Mbuf. 472 * Currently we donot know which FLE has the mbuf stored. 473 * So while retreiving we can go back 1 FLE from the FD -ADDR 474 * to get the MBUF Addr from the previous FLE. 475 * We can have a better approach to use the inline Mbuf 476 */ 477 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op)); 478 DPAA2_FLE_SAVE_CTXT(fle, priv); 479 fle = fle + 1; 480 sge = fle + 2; 481 482 if (likely(bpid < MAX_BPID)) { 483 DPAA2_SET_FD_BPID(fd, bpid); 484 DPAA2_SET_FLE_BPID(fle, bpid); 485 DPAA2_SET_FLE_BPID(fle + 1, bpid); 486 DPAA2_SET_FLE_BPID(sge, bpid); 487 DPAA2_SET_FLE_BPID(sge + 1, bpid); 488 } else { 489 DPAA2_SET_FD_IVP(fd); 490 DPAA2_SET_FLE_IVP(fle); 491 DPAA2_SET_FLE_IVP((fle + 1)); 492 DPAA2_SET_FLE_IVP(sge); 493 DPAA2_SET_FLE_IVP((sge + 1)); 494 } 495 496 flc = &priv->flc_desc[0].flc; 497 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 498 DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length + 499 sess->iv.length); 500 DPAA2_SET_FD_COMPOUND_FMT(fd); 501 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 502 503 PMD_TX_LOG(DEBUG, "cipher_off: 0x%x/length %d,ivlen=%d data_off: 0x%x", 504 sym_op->cipher.data.offset, 505 sym_op->cipher.data.length, 506 sess->iv.length, 507 sym_op->m_src->data_off); 508 509 DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 510 DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset + 511 sym_op->m_src->data_off); 512 513 fle->length = sym_op->cipher.data.length + sess->iv.length; 514 515 PMD_TX_LOG(DEBUG, "1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d", 516 flc, fle, fle->addr_hi, fle->addr_lo, fle->length); 517 518 fle++; 519 520 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 521 fle->length = sym_op->cipher.data.length + sess->iv.length; 522 523 DPAA2_SET_FLE_SG_EXT(fle); 524 525 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 526 sge->length = sess->iv.length; 527 528 sge++; 529 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 530 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + 531 sym_op->m_src->data_off); 532 533 sge->length = sym_op->cipher.data.length; 534 DPAA2_SET_FLE_FIN(sge); 535 DPAA2_SET_FLE_FIN(fle); 536 537 PMD_TX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d", 538 (void *)DPAA2_GET_FD_ADDR(fd), 539 DPAA2_GET_FD_BPID(fd), 540 rte_dpaa2_bpid_info[bpid].meta_data_size, 541 DPAA2_GET_FD_OFFSET(fd), 542 DPAA2_GET_FD_LEN(fd)); 543 544 return 0; 545 } 546 547 static inline int 548 build_sec_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 549 struct qbman_fd *fd, uint16_t bpid) 550 { 551 int ret = -1; 552 553 PMD_INIT_FUNC_TRACE(); 554 555 switch (sess->ctxt_type) { 556 case DPAA2_SEC_CIPHER: 557 ret = build_cipher_fd(sess, op, fd, bpid); 558 break; 559 case DPAA2_SEC_AUTH: 560 ret = build_auth_fd(sess, op, fd, bpid); 561 break; 562 case DPAA2_SEC_AEAD: 563 ret = build_authenc_gcm_fd(sess, op, fd, bpid); 564 break; 565 case DPAA2_SEC_CIPHER_HASH: 566 ret = build_authenc_fd(sess, op, fd, bpid); 567 break; 568 case DPAA2_SEC_HASH_CIPHER: 569 default: 570 RTE_LOG(ERR, PMD, "error: Unsupported session\n"); 571 } 572 return ret; 573 } 574 575 static uint16_t 576 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, 577 uint16_t nb_ops) 578 { 579 /* Function to transmit the frames to given device and VQ*/ 580 uint32_t loop; 581 int32_t ret; 582 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 583 uint32_t frames_to_send; 584 struct qbman_eq_desc eqdesc; 585 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 586 struct qbman_swp *swp; 587 uint16_t num_tx = 0; 588 /*todo - need to support multiple buffer pools */ 589 uint16_t bpid; 590 struct rte_mempool *mb_pool; 591 dpaa2_sec_session *sess; 592 593 if (unlikely(nb_ops == 0)) 594 return 0; 595 596 if (ops[0]->sess_type != RTE_CRYPTO_OP_WITH_SESSION) { 597 RTE_LOG(ERR, PMD, "sessionless crypto op not supported\n"); 598 return 0; 599 } 600 /*Prepare enqueue descriptor*/ 601 qbman_eq_desc_clear(&eqdesc); 602 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); 603 qbman_eq_desc_set_response(&eqdesc, 0, 0); 604 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid); 605 606 if (!DPAA2_PER_LCORE_SEC_DPIO) { 607 ret = dpaa2_affine_qbman_swp_sec(); 608 if (ret) { 609 RTE_LOG(ERR, PMD, "Failure in affining portal\n"); 610 return 0; 611 } 612 } 613 swp = DPAA2_PER_LCORE_SEC_PORTAL; 614 615 while (nb_ops) { 616 frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops; 617 618 for (loop = 0; loop < frames_to_send; loop++) { 619 /*Clear the unused FD fields before sending*/ 620 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd)); 621 sess = (dpaa2_sec_session *) 622 get_session_private_data( 623 (*ops)->sym->session, 624 cryptodev_driver_id); 625 mb_pool = (*ops)->sym->m_src->pool; 626 bpid = mempool_to_bpid(mb_pool); 627 ret = build_sec_fd(sess, *ops, &fd_arr[loop], bpid); 628 if (ret) { 629 PMD_DRV_LOG(ERR, "error: Improper packet" 630 " contents for crypto operation\n"); 631 goto skip_tx; 632 } 633 ops++; 634 } 635 loop = 0; 636 while (loop < frames_to_send) { 637 loop += qbman_swp_enqueue_multiple(swp, &eqdesc, 638 &fd_arr[loop], 639 frames_to_send - loop); 640 } 641 642 num_tx += frames_to_send; 643 nb_ops -= frames_to_send; 644 } 645 skip_tx: 646 dpaa2_qp->tx_vq.tx_pkts += num_tx; 647 dpaa2_qp->tx_vq.err_pkts += nb_ops; 648 return num_tx; 649 } 650 651 static inline struct rte_crypto_op * 652 sec_fd_to_mbuf(const struct qbman_fd *fd) 653 { 654 struct qbman_fle *fle; 655 struct rte_crypto_op *op; 656 struct ctxt_priv *priv; 657 658 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); 659 660 PMD_RX_LOG(DEBUG, "FLE addr = %x - %x, offset = %x", 661 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset); 662 663 /* we are using the first FLE entry to store Mbuf. 664 * Currently we donot know which FLE has the mbuf stored. 665 * So while retreiving we can go back 1 FLE from the FD -ADDR 666 * to get the MBUF Addr from the previous FLE. 667 * We can have a better approach to use the inline Mbuf 668 */ 669 670 if (unlikely(DPAA2_GET_FD_IVP(fd))) { 671 /* TODO complete it. */ 672 RTE_LOG(ERR, PMD, "error: Non inline buffer - WHAT to DO?\n"); 673 return NULL; 674 } 675 op = (struct rte_crypto_op *)DPAA2_IOVA_TO_VADDR( 676 DPAA2_GET_FLE_ADDR((fle - 1))); 677 678 /* Prefeth op */ 679 rte_prefetch0(op->sym->m_src); 680 681 PMD_RX_LOG(DEBUG, "mbuf %p BMAN buf addr %p", 682 (void *)op->sym->m_src, op->sym->m_src->buf_addr); 683 684 PMD_RX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d", 685 (void *)DPAA2_GET_FD_ADDR(fd), 686 DPAA2_GET_FD_BPID(fd), 687 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, 688 DPAA2_GET_FD_OFFSET(fd), 689 DPAA2_GET_FD_LEN(fd)); 690 691 /* free the fle memory */ 692 priv = (struct ctxt_priv *)DPAA2_GET_FLE_CTXT(fle - 1); 693 rte_mempool_put(priv->fle_pool, (void *)(fle - 1)); 694 695 return op; 696 } 697 698 static uint16_t 699 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, 700 uint16_t nb_ops) 701 { 702 /* Function is responsible to receive frames for a given device and VQ*/ 703 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 704 struct qbman_result *dq_storage; 705 uint32_t fqid = dpaa2_qp->rx_vq.fqid; 706 int ret, num_rx = 0; 707 uint8_t is_last = 0, status; 708 struct qbman_swp *swp; 709 const struct qbman_fd *fd; 710 struct qbman_pull_desc pulldesc; 711 712 if (!DPAA2_PER_LCORE_SEC_DPIO) { 713 ret = dpaa2_affine_qbman_swp_sec(); 714 if (ret) { 715 RTE_LOG(ERR, PMD, "Failure in affining portal\n"); 716 return 0; 717 } 718 } 719 swp = DPAA2_PER_LCORE_SEC_PORTAL; 720 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0]; 721 722 qbman_pull_desc_clear(&pulldesc); 723 qbman_pull_desc_set_numframes(&pulldesc, 724 (nb_ops > DPAA2_DQRR_RING_SIZE) ? 725 DPAA2_DQRR_RING_SIZE : nb_ops); 726 qbman_pull_desc_set_fq(&pulldesc, fqid); 727 qbman_pull_desc_set_storage(&pulldesc, dq_storage, 728 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage), 729 1); 730 731 /*Issue a volatile dequeue command. */ 732 while (1) { 733 if (qbman_swp_pull(swp, &pulldesc)) { 734 RTE_LOG(WARNING, PMD, 735 "SEC VDQ command is not issued : QBMAN busy\n"); 736 /* Portal was busy, try again */ 737 continue; 738 } 739 break; 740 }; 741 742 /* Receive the packets till Last Dequeue entry is found with 743 * respect to the above issues PULL command. 744 */ 745 while (!is_last) { 746 /* Check if the previous issued command is completed. 747 * Also seems like the SWP is shared between the Ethernet Driver 748 * and the SEC driver. 749 */ 750 while (!qbman_check_command_complete(dq_storage)) 751 ; 752 753 /* Loop until the dq_storage is updated with 754 * new token by QBMAN 755 */ 756 while (!qbman_check_new_result(dq_storage)) 757 ; 758 /* Check whether Last Pull command is Expired and 759 * setting Condition for Loop termination 760 */ 761 if (qbman_result_DQ_is_pull_complete(dq_storage)) { 762 is_last = 1; 763 /* Check for valid frame. */ 764 status = (uint8_t)qbman_result_DQ_flags(dq_storage); 765 if (unlikely( 766 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) { 767 PMD_RX_LOG(DEBUG, "No frame is delivered"); 768 continue; 769 } 770 } 771 772 fd = qbman_result_DQ_fd(dq_storage); 773 ops[num_rx] = sec_fd_to_mbuf(fd); 774 775 if (unlikely(fd->simple.frc)) { 776 /* TODO Parse SEC errors */ 777 RTE_LOG(ERR, PMD, "SEC returned Error - %x\n", 778 fd->simple.frc); 779 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR; 780 } else { 781 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 782 } 783 784 num_rx++; 785 dq_storage++; 786 } /* End of Packet Rx loop */ 787 788 dpaa2_qp->rx_vq.rx_pkts += num_rx; 789 790 PMD_RX_LOG(DEBUG, "SEC Received %d Packets", num_rx); 791 /*Return the total number of packets received to DPAA2 app*/ 792 return num_rx; 793 } 794 795 /** Release queue pair */ 796 static int 797 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) 798 { 799 struct dpaa2_sec_qp *qp = 800 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id]; 801 802 PMD_INIT_FUNC_TRACE(); 803 804 if (qp->rx_vq.q_storage) { 805 dpaa2_free_dq_storage(qp->rx_vq.q_storage); 806 rte_free(qp->rx_vq.q_storage); 807 } 808 rte_free(qp); 809 810 dev->data->queue_pairs[queue_pair_id] = NULL; 811 812 return 0; 813 } 814 815 /** Setup a queue pair */ 816 static int 817 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, 818 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf, 819 __rte_unused int socket_id, 820 __rte_unused struct rte_mempool *session_pool) 821 { 822 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 823 struct dpaa2_sec_qp *qp; 824 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 825 struct dpseci_rx_queue_cfg cfg; 826 int32_t retcode; 827 828 PMD_INIT_FUNC_TRACE(); 829 830 /* If qp is already in use free ring memory and qp metadata. */ 831 if (dev->data->queue_pairs[qp_id] != NULL) { 832 PMD_DRV_LOG(INFO, "QP already setup"); 833 return 0; 834 } 835 836 PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, conf =%p", 837 dev, qp_id, qp_conf); 838 839 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 840 841 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp), 842 RTE_CACHE_LINE_SIZE); 843 if (!qp) { 844 RTE_LOG(ERR, PMD, "malloc failed for rx/tx queues\n"); 845 return -1; 846 } 847 848 qp->rx_vq.dev = dev; 849 qp->tx_vq.dev = dev; 850 qp->rx_vq.q_storage = rte_malloc("sec dq storage", 851 sizeof(struct queue_storage_info_t), 852 RTE_CACHE_LINE_SIZE); 853 if (!qp->rx_vq.q_storage) { 854 RTE_LOG(ERR, PMD, "malloc failed for q_storage\n"); 855 return -1; 856 } 857 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t)); 858 859 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) { 860 RTE_LOG(ERR, PMD, "dpaa2_alloc_dq_storage failed\n"); 861 return -1; 862 } 863 864 dev->data->queue_pairs[qp_id] = qp; 865 866 cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX; 867 cfg.user_ctx = (uint64_t)(&qp->rx_vq); 868 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 869 qp_id, &cfg); 870 return retcode; 871 } 872 873 /** Start queue pair */ 874 static int 875 dpaa2_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev, 876 __rte_unused uint16_t queue_pair_id) 877 { 878 PMD_INIT_FUNC_TRACE(); 879 880 return 0; 881 } 882 883 /** Stop queue pair */ 884 static int 885 dpaa2_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev, 886 __rte_unused uint16_t queue_pair_id) 887 { 888 PMD_INIT_FUNC_TRACE(); 889 890 return 0; 891 } 892 893 /** Return the number of allocated queue pairs */ 894 static uint32_t 895 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev) 896 { 897 PMD_INIT_FUNC_TRACE(); 898 899 return dev->data->nb_queue_pairs; 900 } 901 902 /** Returns the size of the aesni gcm session structure */ 903 static unsigned int 904 dpaa2_sec_session_get_size(struct rte_cryptodev *dev __rte_unused) 905 { 906 PMD_INIT_FUNC_TRACE(); 907 908 return sizeof(dpaa2_sec_session); 909 } 910 911 static int 912 dpaa2_sec_cipher_init(struct rte_cryptodev *dev, 913 struct rte_crypto_sym_xform *xform, 914 dpaa2_sec_session *session) 915 { 916 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 917 struct alginfo cipherdata; 918 int bufsize, i; 919 struct ctxt_priv *priv; 920 struct sec_flow_context *flc; 921 922 PMD_INIT_FUNC_TRACE(); 923 924 /* For SEC CIPHER only one descriptor is required. */ 925 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 926 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 927 RTE_CACHE_LINE_SIZE); 928 if (priv == NULL) { 929 RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n"); 930 return -1; 931 } 932 933 priv->fle_pool = dev_priv->fle_pool; 934 935 flc = &priv->flc_desc[0].flc; 936 937 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, 938 RTE_CACHE_LINE_SIZE); 939 if (session->cipher_key.data == NULL) { 940 RTE_LOG(ERR, PMD, "No Memory for cipher key\n"); 941 rte_free(priv); 942 return -1; 943 } 944 session->cipher_key.length = xform->cipher.key.length; 945 946 memcpy(session->cipher_key.data, xform->cipher.key.data, 947 xform->cipher.key.length); 948 cipherdata.key = (uint64_t)session->cipher_key.data; 949 cipherdata.keylen = session->cipher_key.length; 950 cipherdata.key_enc_flags = 0; 951 cipherdata.key_type = RTA_DATA_IMM; 952 953 /* Set IV parameters */ 954 session->iv.offset = xform->cipher.iv.offset; 955 session->iv.length = xform->cipher.iv.length; 956 957 switch (xform->cipher.algo) { 958 case RTE_CRYPTO_CIPHER_AES_CBC: 959 cipherdata.algtype = OP_ALG_ALGSEL_AES; 960 cipherdata.algmode = OP_ALG_AAI_CBC; 961 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 962 break; 963 case RTE_CRYPTO_CIPHER_3DES_CBC: 964 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 965 cipherdata.algmode = OP_ALG_AAI_CBC; 966 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 967 break; 968 case RTE_CRYPTO_CIPHER_AES_CTR: 969 cipherdata.algtype = OP_ALG_ALGSEL_AES; 970 cipherdata.algmode = OP_ALG_AAI_CTR; 971 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 972 break; 973 case RTE_CRYPTO_CIPHER_3DES_CTR: 974 case RTE_CRYPTO_CIPHER_AES_ECB: 975 case RTE_CRYPTO_CIPHER_3DES_ECB: 976 case RTE_CRYPTO_CIPHER_AES_XTS: 977 case RTE_CRYPTO_CIPHER_AES_F8: 978 case RTE_CRYPTO_CIPHER_ARC4: 979 case RTE_CRYPTO_CIPHER_KASUMI_F8: 980 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 981 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 982 case RTE_CRYPTO_CIPHER_NULL: 983 RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n", 984 xform->cipher.algo); 985 goto error_out; 986 default: 987 RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n", 988 xform->cipher.algo); 989 goto error_out; 990 } 991 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 992 DIR_ENC : DIR_DEC; 993 994 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 995 &cipherdata, NULL, session->iv.length, 996 session->dir); 997 if (bufsize < 0) { 998 RTE_LOG(ERR, PMD, "Crypto: Descriptor build failed\n"); 999 goto error_out; 1000 } 1001 flc->dhr = 0; 1002 flc->bpv0 = 0x1; 1003 flc->mode_bits = 0x8000; 1004 1005 flc->word1_sdl = (uint8_t)bufsize; 1006 flc->word2_rflc_31_0 = lower_32_bits( 1007 (uint64_t)&(((struct dpaa2_sec_qp *) 1008 dev->data->queue_pairs[0])->rx_vq)); 1009 flc->word3_rflc_63_32 = upper_32_bits( 1010 (uint64_t)&(((struct dpaa2_sec_qp *) 1011 dev->data->queue_pairs[0])->rx_vq)); 1012 session->ctxt = priv; 1013 1014 for (i = 0; i < bufsize; i++) 1015 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n", 1016 i, priv->flc_desc[0].desc[i]); 1017 1018 return 0; 1019 1020 error_out: 1021 rte_free(session->cipher_key.data); 1022 rte_free(priv); 1023 return -1; 1024 } 1025 1026 static int 1027 dpaa2_sec_auth_init(struct rte_cryptodev *dev, 1028 struct rte_crypto_sym_xform *xform, 1029 dpaa2_sec_session *session) 1030 { 1031 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1032 struct alginfo authdata; 1033 unsigned int bufsize, i; 1034 struct ctxt_priv *priv; 1035 struct sec_flow_context *flc; 1036 1037 PMD_INIT_FUNC_TRACE(); 1038 1039 /* For SEC AUTH three descriptors are required for various stages */ 1040 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1041 sizeof(struct ctxt_priv) + 3 * 1042 sizeof(struct sec_flc_desc), 1043 RTE_CACHE_LINE_SIZE); 1044 if (priv == NULL) { 1045 RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n"); 1046 return -1; 1047 } 1048 1049 priv->fle_pool = dev_priv->fle_pool; 1050 flc = &priv->flc_desc[DESC_INITFINAL].flc; 1051 1052 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length, 1053 RTE_CACHE_LINE_SIZE); 1054 if (session->auth_key.data == NULL) { 1055 RTE_LOG(ERR, PMD, "No Memory for auth key\n"); 1056 rte_free(priv); 1057 return -1; 1058 } 1059 session->auth_key.length = xform->auth.key.length; 1060 1061 memcpy(session->auth_key.data, xform->auth.key.data, 1062 xform->auth.key.length); 1063 authdata.key = (uint64_t)session->auth_key.data; 1064 authdata.keylen = session->auth_key.length; 1065 authdata.key_enc_flags = 0; 1066 authdata.key_type = RTA_DATA_IMM; 1067 1068 session->digest_length = xform->auth.digest_length; 1069 1070 switch (xform->auth.algo) { 1071 case RTE_CRYPTO_AUTH_SHA1_HMAC: 1072 authdata.algtype = OP_ALG_ALGSEL_SHA1; 1073 authdata.algmode = OP_ALG_AAI_HMAC; 1074 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 1075 break; 1076 case RTE_CRYPTO_AUTH_MD5_HMAC: 1077 authdata.algtype = OP_ALG_ALGSEL_MD5; 1078 authdata.algmode = OP_ALG_AAI_HMAC; 1079 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 1080 break; 1081 case RTE_CRYPTO_AUTH_SHA256_HMAC: 1082 authdata.algtype = OP_ALG_ALGSEL_SHA256; 1083 authdata.algmode = OP_ALG_AAI_HMAC; 1084 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 1085 break; 1086 case RTE_CRYPTO_AUTH_SHA384_HMAC: 1087 authdata.algtype = OP_ALG_ALGSEL_SHA384; 1088 authdata.algmode = OP_ALG_AAI_HMAC; 1089 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 1090 break; 1091 case RTE_CRYPTO_AUTH_SHA512_HMAC: 1092 authdata.algtype = OP_ALG_ALGSEL_SHA512; 1093 authdata.algmode = OP_ALG_AAI_HMAC; 1094 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 1095 break; 1096 case RTE_CRYPTO_AUTH_SHA224_HMAC: 1097 authdata.algtype = OP_ALG_ALGSEL_SHA224; 1098 authdata.algmode = OP_ALG_AAI_HMAC; 1099 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 1100 break; 1101 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 1102 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 1103 case RTE_CRYPTO_AUTH_NULL: 1104 case RTE_CRYPTO_AUTH_SHA1: 1105 case RTE_CRYPTO_AUTH_SHA256: 1106 case RTE_CRYPTO_AUTH_SHA512: 1107 case RTE_CRYPTO_AUTH_SHA224: 1108 case RTE_CRYPTO_AUTH_SHA384: 1109 case RTE_CRYPTO_AUTH_MD5: 1110 case RTE_CRYPTO_AUTH_AES_GMAC: 1111 case RTE_CRYPTO_AUTH_KASUMI_F9: 1112 case RTE_CRYPTO_AUTH_AES_CMAC: 1113 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 1114 case RTE_CRYPTO_AUTH_ZUC_EIA3: 1115 RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n", 1116 xform->auth.algo); 1117 goto error_out; 1118 default: 1119 RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n", 1120 xform->auth.algo); 1121 goto error_out; 1122 } 1123 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? 1124 DIR_ENC : DIR_DEC; 1125 1126 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 1127 1, 0, &authdata, !session->dir, 1128 session->digest_length); 1129 1130 flc->word1_sdl = (uint8_t)bufsize; 1131 flc->word2_rflc_31_0 = lower_32_bits( 1132 (uint64_t)&(((struct dpaa2_sec_qp *) 1133 dev->data->queue_pairs[0])->rx_vq)); 1134 flc->word3_rflc_63_32 = upper_32_bits( 1135 (uint64_t)&(((struct dpaa2_sec_qp *) 1136 dev->data->queue_pairs[0])->rx_vq)); 1137 session->ctxt = priv; 1138 for (i = 0; i < bufsize; i++) 1139 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n", 1140 i, priv->flc_desc[DESC_INITFINAL].desc[i]); 1141 1142 1143 return 0; 1144 1145 error_out: 1146 rte_free(session->auth_key.data); 1147 rte_free(priv); 1148 return -1; 1149 } 1150 1151 static int 1152 dpaa2_sec_aead_init(struct rte_cryptodev *dev, 1153 struct rte_crypto_sym_xform *xform, 1154 dpaa2_sec_session *session) 1155 { 1156 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt; 1157 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1158 struct alginfo aeaddata; 1159 unsigned int bufsize, i; 1160 struct ctxt_priv *priv; 1161 struct sec_flow_context *flc; 1162 struct rte_crypto_aead_xform *aead_xform = &xform->aead; 1163 int err; 1164 1165 PMD_INIT_FUNC_TRACE(); 1166 1167 /* Set IV parameters */ 1168 session->iv.offset = aead_xform->iv.offset; 1169 session->iv.length = aead_xform->iv.length; 1170 session->ctxt_type = DPAA2_SEC_AEAD; 1171 1172 /* For SEC AEAD only one descriptor is required */ 1173 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1174 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 1175 RTE_CACHE_LINE_SIZE); 1176 if (priv == NULL) { 1177 RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n"); 1178 return -1; 1179 } 1180 1181 priv->fle_pool = dev_priv->fle_pool; 1182 flc = &priv->flc_desc[0].flc; 1183 1184 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 1185 RTE_CACHE_LINE_SIZE); 1186 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 1187 RTE_LOG(ERR, PMD, "No Memory for aead key\n"); 1188 rte_free(priv); 1189 return -1; 1190 } 1191 memcpy(session->aead_key.data, aead_xform->key.data, 1192 aead_xform->key.length); 1193 1194 session->digest_length = aead_xform->digest_length; 1195 session->aead_key.length = aead_xform->key.length; 1196 ctxt->auth_only_len = aead_xform->aad_length; 1197 1198 aeaddata.key = (uint64_t)session->aead_key.data; 1199 aeaddata.keylen = session->aead_key.length; 1200 aeaddata.key_enc_flags = 0; 1201 aeaddata.key_type = RTA_DATA_IMM; 1202 1203 switch (aead_xform->algo) { 1204 case RTE_CRYPTO_AEAD_AES_GCM: 1205 aeaddata.algtype = OP_ALG_ALGSEL_AES; 1206 aeaddata.algmode = OP_ALG_AAI_GCM; 1207 session->cipher_alg = RTE_CRYPTO_AEAD_AES_GCM; 1208 break; 1209 case RTE_CRYPTO_AEAD_AES_CCM: 1210 RTE_LOG(ERR, PMD, "Crypto: Unsupported AEAD alg %u\n", 1211 aead_xform->algo); 1212 goto error_out; 1213 default: 1214 RTE_LOG(ERR, PMD, "Crypto: Undefined AEAD specified %u\n", 1215 aead_xform->algo); 1216 goto error_out; 1217 } 1218 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 1219 DIR_ENC : DIR_DEC; 1220 1221 priv->flc_desc[0].desc[0] = aeaddata.keylen; 1222 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 1223 MIN_JOB_DESC_SIZE, 1224 (unsigned int *)priv->flc_desc[0].desc, 1225 &priv->flc_desc[0].desc[1], 1); 1226 1227 if (err < 0) { 1228 PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths\n"); 1229 goto error_out; 1230 } 1231 if (priv->flc_desc[0].desc[1] & 1) { 1232 aeaddata.key_type = RTA_DATA_IMM; 1233 } else { 1234 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key); 1235 aeaddata.key_type = RTA_DATA_PTR; 1236 } 1237 priv->flc_desc[0].desc[0] = 0; 1238 priv->flc_desc[0].desc[1] = 0; 1239 1240 if (session->dir == DIR_ENC) 1241 bufsize = cnstr_shdsc_gcm_encap( 1242 priv->flc_desc[0].desc, 1, 0, 1243 &aeaddata, session->iv.length, 1244 session->digest_length); 1245 else 1246 bufsize = cnstr_shdsc_gcm_decap( 1247 priv->flc_desc[0].desc, 1, 0, 1248 &aeaddata, session->iv.length, 1249 session->digest_length); 1250 flc->word1_sdl = (uint8_t)bufsize; 1251 flc->word2_rflc_31_0 = lower_32_bits( 1252 (uint64_t)&(((struct dpaa2_sec_qp *) 1253 dev->data->queue_pairs[0])->rx_vq)); 1254 flc->word3_rflc_63_32 = upper_32_bits( 1255 (uint64_t)&(((struct dpaa2_sec_qp *) 1256 dev->data->queue_pairs[0])->rx_vq)); 1257 session->ctxt = priv; 1258 for (i = 0; i < bufsize; i++) 1259 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n", 1260 i, priv->flc_desc[0].desc[i]); 1261 1262 return 0; 1263 1264 error_out: 1265 rte_free(session->aead_key.data); 1266 rte_free(priv); 1267 return -1; 1268 } 1269 1270 1271 static int 1272 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, 1273 struct rte_crypto_sym_xform *xform, 1274 dpaa2_sec_session *session) 1275 { 1276 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt; 1277 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1278 struct alginfo authdata, cipherdata; 1279 unsigned int bufsize, i; 1280 struct ctxt_priv *priv; 1281 struct sec_flow_context *flc; 1282 struct rte_crypto_cipher_xform *cipher_xform; 1283 struct rte_crypto_auth_xform *auth_xform; 1284 int err; 1285 1286 PMD_INIT_FUNC_TRACE(); 1287 1288 if (session->ext_params.aead_ctxt.auth_cipher_text) { 1289 cipher_xform = &xform->cipher; 1290 auth_xform = &xform->next->auth; 1291 session->ctxt_type = 1292 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1293 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER; 1294 } else { 1295 cipher_xform = &xform->next->cipher; 1296 auth_xform = &xform->auth; 1297 session->ctxt_type = 1298 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1299 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH; 1300 } 1301 1302 /* Set IV parameters */ 1303 session->iv.offset = cipher_xform->iv.offset; 1304 session->iv.length = cipher_xform->iv.length; 1305 1306 /* For SEC AEAD only one descriptor is required */ 1307 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1308 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 1309 RTE_CACHE_LINE_SIZE); 1310 if (priv == NULL) { 1311 RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n"); 1312 return -1; 1313 } 1314 1315 priv->fle_pool = dev_priv->fle_pool; 1316 flc = &priv->flc_desc[0].flc; 1317 1318 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length, 1319 RTE_CACHE_LINE_SIZE); 1320 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) { 1321 RTE_LOG(ERR, PMD, "No Memory for cipher key\n"); 1322 rte_free(priv); 1323 return -1; 1324 } 1325 session->cipher_key.length = cipher_xform->key.length; 1326 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length, 1327 RTE_CACHE_LINE_SIZE); 1328 if (session->auth_key.data == NULL && auth_xform->key.length > 0) { 1329 RTE_LOG(ERR, PMD, "No Memory for auth key\n"); 1330 rte_free(session->cipher_key.data); 1331 rte_free(priv); 1332 return -1; 1333 } 1334 session->auth_key.length = auth_xform->key.length; 1335 memcpy(session->cipher_key.data, cipher_xform->key.data, 1336 cipher_xform->key.length); 1337 memcpy(session->auth_key.data, auth_xform->key.data, 1338 auth_xform->key.length); 1339 1340 authdata.key = (uint64_t)session->auth_key.data; 1341 authdata.keylen = session->auth_key.length; 1342 authdata.key_enc_flags = 0; 1343 authdata.key_type = RTA_DATA_IMM; 1344 1345 session->digest_length = auth_xform->digest_length; 1346 1347 switch (auth_xform->algo) { 1348 case RTE_CRYPTO_AUTH_SHA1_HMAC: 1349 authdata.algtype = OP_ALG_ALGSEL_SHA1; 1350 authdata.algmode = OP_ALG_AAI_HMAC; 1351 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 1352 break; 1353 case RTE_CRYPTO_AUTH_MD5_HMAC: 1354 authdata.algtype = OP_ALG_ALGSEL_MD5; 1355 authdata.algmode = OP_ALG_AAI_HMAC; 1356 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 1357 break; 1358 case RTE_CRYPTO_AUTH_SHA224_HMAC: 1359 authdata.algtype = OP_ALG_ALGSEL_SHA224; 1360 authdata.algmode = OP_ALG_AAI_HMAC; 1361 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 1362 break; 1363 case RTE_CRYPTO_AUTH_SHA256_HMAC: 1364 authdata.algtype = OP_ALG_ALGSEL_SHA256; 1365 authdata.algmode = OP_ALG_AAI_HMAC; 1366 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 1367 break; 1368 case RTE_CRYPTO_AUTH_SHA384_HMAC: 1369 authdata.algtype = OP_ALG_ALGSEL_SHA384; 1370 authdata.algmode = OP_ALG_AAI_HMAC; 1371 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 1372 break; 1373 case RTE_CRYPTO_AUTH_SHA512_HMAC: 1374 authdata.algtype = OP_ALG_ALGSEL_SHA512; 1375 authdata.algmode = OP_ALG_AAI_HMAC; 1376 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 1377 break; 1378 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 1379 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 1380 case RTE_CRYPTO_AUTH_NULL: 1381 case RTE_CRYPTO_AUTH_SHA1: 1382 case RTE_CRYPTO_AUTH_SHA256: 1383 case RTE_CRYPTO_AUTH_SHA512: 1384 case RTE_CRYPTO_AUTH_SHA224: 1385 case RTE_CRYPTO_AUTH_SHA384: 1386 case RTE_CRYPTO_AUTH_MD5: 1387 case RTE_CRYPTO_AUTH_AES_GMAC: 1388 case RTE_CRYPTO_AUTH_KASUMI_F9: 1389 case RTE_CRYPTO_AUTH_AES_CMAC: 1390 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 1391 case RTE_CRYPTO_AUTH_ZUC_EIA3: 1392 RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n", 1393 auth_xform->algo); 1394 goto error_out; 1395 default: 1396 RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n", 1397 auth_xform->algo); 1398 goto error_out; 1399 } 1400 cipherdata.key = (uint64_t)session->cipher_key.data; 1401 cipherdata.keylen = session->cipher_key.length; 1402 cipherdata.key_enc_flags = 0; 1403 cipherdata.key_type = RTA_DATA_IMM; 1404 1405 switch (cipher_xform->algo) { 1406 case RTE_CRYPTO_CIPHER_AES_CBC: 1407 cipherdata.algtype = OP_ALG_ALGSEL_AES; 1408 cipherdata.algmode = OP_ALG_AAI_CBC; 1409 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 1410 break; 1411 case RTE_CRYPTO_CIPHER_3DES_CBC: 1412 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 1413 cipherdata.algmode = OP_ALG_AAI_CBC; 1414 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 1415 break; 1416 case RTE_CRYPTO_CIPHER_AES_CTR: 1417 cipherdata.algtype = OP_ALG_ALGSEL_AES; 1418 cipherdata.algmode = OP_ALG_AAI_CTR; 1419 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 1420 break; 1421 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 1422 case RTE_CRYPTO_CIPHER_NULL: 1423 case RTE_CRYPTO_CIPHER_3DES_ECB: 1424 case RTE_CRYPTO_CIPHER_AES_ECB: 1425 case RTE_CRYPTO_CIPHER_KASUMI_F8: 1426 RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n", 1427 cipher_xform->algo); 1428 goto error_out; 1429 default: 1430 RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n", 1431 cipher_xform->algo); 1432 goto error_out; 1433 } 1434 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1435 DIR_ENC : DIR_DEC; 1436 1437 priv->flc_desc[0].desc[0] = cipherdata.keylen; 1438 priv->flc_desc[0].desc[1] = authdata.keylen; 1439 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 1440 MIN_JOB_DESC_SIZE, 1441 (unsigned int *)priv->flc_desc[0].desc, 1442 &priv->flc_desc[0].desc[2], 2); 1443 1444 if (err < 0) { 1445 PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths\n"); 1446 goto error_out; 1447 } 1448 if (priv->flc_desc[0].desc[2] & 1) { 1449 cipherdata.key_type = RTA_DATA_IMM; 1450 } else { 1451 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); 1452 cipherdata.key_type = RTA_DATA_PTR; 1453 } 1454 if (priv->flc_desc[0].desc[2] & (1 << 1)) { 1455 authdata.key_type = RTA_DATA_IMM; 1456 } else { 1457 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key); 1458 authdata.key_type = RTA_DATA_PTR; 1459 } 1460 priv->flc_desc[0].desc[0] = 0; 1461 priv->flc_desc[0].desc[1] = 0; 1462 priv->flc_desc[0].desc[2] = 0; 1463 1464 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) { 1465 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1, 1466 0, &cipherdata, &authdata, 1467 session->iv.length, 1468 ctxt->auth_only_len, 1469 session->digest_length, 1470 session->dir); 1471 } else { 1472 RTE_LOG(ERR, PMD, "Hash before cipher not supported\n"); 1473 goto error_out; 1474 } 1475 1476 flc->word1_sdl = (uint8_t)bufsize; 1477 flc->word2_rflc_31_0 = lower_32_bits( 1478 (uint64_t)&(((struct dpaa2_sec_qp *) 1479 dev->data->queue_pairs[0])->rx_vq)); 1480 flc->word3_rflc_63_32 = upper_32_bits( 1481 (uint64_t)&(((struct dpaa2_sec_qp *) 1482 dev->data->queue_pairs[0])->rx_vq)); 1483 session->ctxt = priv; 1484 for (i = 0; i < bufsize; i++) 1485 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n", 1486 i, priv->flc_desc[0].desc[i]); 1487 1488 return 0; 1489 1490 error_out: 1491 rte_free(session->cipher_key.data); 1492 rte_free(session->auth_key.data); 1493 rte_free(priv); 1494 return -1; 1495 } 1496 1497 static int 1498 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev, 1499 struct rte_crypto_sym_xform *xform, void *sess) 1500 { 1501 dpaa2_sec_session *session = sess; 1502 1503 PMD_INIT_FUNC_TRACE(); 1504 1505 if (unlikely(sess == NULL)) { 1506 RTE_LOG(ERR, PMD, "invalid session struct\n"); 1507 return -1; 1508 } 1509 1510 /* Default IV length = 0 */ 1511 session->iv.length = 0; 1512 1513 /* Cipher Only */ 1514 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 1515 session->ctxt_type = DPAA2_SEC_CIPHER; 1516 dpaa2_sec_cipher_init(dev, xform, session); 1517 1518 /* Authentication Only */ 1519 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 1520 xform->next == NULL) { 1521 session->ctxt_type = DPAA2_SEC_AUTH; 1522 dpaa2_sec_auth_init(dev, xform, session); 1523 1524 /* Cipher then Authenticate */ 1525 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 1526 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 1527 session->ext_params.aead_ctxt.auth_cipher_text = true; 1528 dpaa2_sec_aead_chain_init(dev, xform, session); 1529 1530 /* Authenticate then Cipher */ 1531 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 1532 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 1533 session->ext_params.aead_ctxt.auth_cipher_text = false; 1534 dpaa2_sec_aead_chain_init(dev, xform, session); 1535 1536 /* AEAD operation for AES-GCM kind of Algorithms */ 1537 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && 1538 xform->next == NULL) { 1539 dpaa2_sec_aead_init(dev, xform, session); 1540 1541 } else { 1542 RTE_LOG(ERR, PMD, "Invalid crypto type\n"); 1543 return -EINVAL; 1544 } 1545 1546 return 0; 1547 } 1548 1549 static int 1550 dpaa2_sec_session_configure(struct rte_cryptodev *dev, 1551 struct rte_crypto_sym_xform *xform, 1552 struct rte_cryptodev_sym_session *sess, 1553 struct rte_mempool *mempool) 1554 { 1555 void *sess_private_data; 1556 int ret; 1557 1558 if (rte_mempool_get(mempool, &sess_private_data)) { 1559 CDEV_LOG_ERR( 1560 "Couldn't get object from session mempool"); 1561 return -ENOMEM; 1562 } 1563 1564 ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data); 1565 if (ret != 0) { 1566 PMD_DRV_LOG(ERR, "DPAA2 PMD: failed to configure " 1567 "session parameters"); 1568 1569 /* Return session to mempool */ 1570 rte_mempool_put(mempool, sess_private_data); 1571 return ret; 1572 } 1573 1574 set_session_private_data(sess, dev->driver_id, 1575 sess_private_data); 1576 1577 return 0; 1578 } 1579 1580 /** Clear the memory of session so it doesn't leave key material behind */ 1581 static void 1582 dpaa2_sec_session_clear(struct rte_cryptodev *dev, 1583 struct rte_cryptodev_sym_session *sess) 1584 { 1585 PMD_INIT_FUNC_TRACE(); 1586 uint8_t index = dev->driver_id; 1587 void *sess_priv = get_session_private_data(sess, index); 1588 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv; 1589 1590 if (sess_priv) { 1591 rte_free(s->ctxt); 1592 rte_free(s->cipher_key.data); 1593 rte_free(s->auth_key.data); 1594 memset(sess, 0, sizeof(dpaa2_sec_session)); 1595 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 1596 set_session_private_data(sess, index, NULL); 1597 rte_mempool_put(sess_mp, sess_priv); 1598 } 1599 } 1600 1601 static int 1602 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused, 1603 struct rte_cryptodev_config *config __rte_unused) 1604 { 1605 PMD_INIT_FUNC_TRACE(); 1606 1607 return 0; 1608 } 1609 1610 static int 1611 dpaa2_sec_dev_start(struct rte_cryptodev *dev) 1612 { 1613 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 1614 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 1615 struct dpseci_attr attr; 1616 struct dpaa2_queue *dpaa2_q; 1617 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 1618 dev->data->queue_pairs; 1619 struct dpseci_rx_queue_attr rx_attr; 1620 struct dpseci_tx_queue_attr tx_attr; 1621 int ret, i; 1622 1623 PMD_INIT_FUNC_TRACE(); 1624 1625 memset(&attr, 0, sizeof(struct dpseci_attr)); 1626 1627 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token); 1628 if (ret) { 1629 PMD_INIT_LOG(ERR, "DPSECI with HW_ID = %d ENABLE FAILED\n", 1630 priv->hw_id); 1631 goto get_attr_failure; 1632 } 1633 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr); 1634 if (ret) { 1635 PMD_INIT_LOG(ERR, 1636 "DPSEC ATTRIBUTE READ FAILED, disabling DPSEC\n"); 1637 goto get_attr_failure; 1638 } 1639 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) { 1640 dpaa2_q = &qp[i]->rx_vq; 1641 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 1642 &rx_attr); 1643 dpaa2_q->fqid = rx_attr.fqid; 1644 PMD_INIT_LOG(DEBUG, "rx_fqid: %d", dpaa2_q->fqid); 1645 } 1646 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) { 1647 dpaa2_q = &qp[i]->tx_vq; 1648 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 1649 &tx_attr); 1650 dpaa2_q->fqid = tx_attr.fqid; 1651 PMD_INIT_LOG(DEBUG, "tx_fqid: %d", dpaa2_q->fqid); 1652 } 1653 1654 return 0; 1655 get_attr_failure: 1656 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 1657 return -1; 1658 } 1659 1660 static void 1661 dpaa2_sec_dev_stop(struct rte_cryptodev *dev) 1662 { 1663 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 1664 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 1665 int ret; 1666 1667 PMD_INIT_FUNC_TRACE(); 1668 1669 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 1670 if (ret) { 1671 PMD_INIT_LOG(ERR, "Failure in disabling dpseci %d device", 1672 priv->hw_id); 1673 return; 1674 } 1675 1676 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token); 1677 if (ret < 0) { 1678 PMD_INIT_LOG(ERR, "SEC Device cannot be reset:Error = %0x\n", 1679 ret); 1680 return; 1681 } 1682 } 1683 1684 static int 1685 dpaa2_sec_dev_close(struct rte_cryptodev *dev) 1686 { 1687 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 1688 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 1689 int ret; 1690 1691 PMD_INIT_FUNC_TRACE(); 1692 1693 /* Function is reverse of dpaa2_sec_dev_init. 1694 * It does the following: 1695 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id 1696 * 2. Close the DPSECI device 1697 * 3. Free the allocated resources. 1698 */ 1699 1700 /*Close the device at underlying layer*/ 1701 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token); 1702 if (ret) { 1703 PMD_INIT_LOG(ERR, "Failure closing dpseci device with" 1704 " error code %d\n", ret); 1705 return -1; 1706 } 1707 1708 /*Free the allocated memory for ethernet private data and dpseci*/ 1709 priv->hw = NULL; 1710 rte_free(dpseci); 1711 1712 return 0; 1713 } 1714 1715 static void 1716 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev, 1717 struct rte_cryptodev_info *info) 1718 { 1719 struct dpaa2_sec_dev_private *internals = dev->data->dev_private; 1720 1721 PMD_INIT_FUNC_TRACE(); 1722 if (info != NULL) { 1723 info->max_nb_queue_pairs = internals->max_nb_queue_pairs; 1724 info->feature_flags = dev->feature_flags; 1725 info->capabilities = dpaa2_sec_capabilities; 1726 info->sym.max_nb_sessions = internals->max_nb_sessions; 1727 info->driver_id = cryptodev_driver_id; 1728 } 1729 } 1730 1731 static 1732 void dpaa2_sec_stats_get(struct rte_cryptodev *dev, 1733 struct rte_cryptodev_stats *stats) 1734 { 1735 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 1736 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 1737 struct dpseci_sec_counters counters = {0}; 1738 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 1739 dev->data->queue_pairs; 1740 int ret, i; 1741 1742 PMD_INIT_FUNC_TRACE(); 1743 if (stats == NULL) { 1744 PMD_DRV_LOG(ERR, "invalid stats ptr NULL"); 1745 return; 1746 } 1747 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 1748 if (qp[i] == NULL) { 1749 PMD_DRV_LOG(DEBUG, "Uninitialised queue pair"); 1750 continue; 1751 } 1752 1753 stats->enqueued_count += qp[i]->tx_vq.tx_pkts; 1754 stats->dequeued_count += qp[i]->rx_vq.rx_pkts; 1755 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts; 1756 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts; 1757 } 1758 1759 ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token, 1760 &counters); 1761 if (ret) { 1762 PMD_DRV_LOG(ERR, "dpseci_get_sec_counters failed\n"); 1763 } else { 1764 PMD_DRV_LOG(INFO, "dpseci hw stats:" 1765 "\n\tNumber of Requests Dequeued = %lu" 1766 "\n\tNumber of Outbound Encrypt Requests = %lu" 1767 "\n\tNumber of Inbound Decrypt Requests = %lu" 1768 "\n\tNumber of Outbound Bytes Encrypted = %lu" 1769 "\n\tNumber of Outbound Bytes Protected = %lu" 1770 "\n\tNumber of Inbound Bytes Decrypted = %lu" 1771 "\n\tNumber of Inbound Bytes Validated = %lu", 1772 counters.dequeued_requests, 1773 counters.ob_enc_requests, 1774 counters.ib_dec_requests, 1775 counters.ob_enc_bytes, 1776 counters.ob_prot_bytes, 1777 counters.ib_dec_bytes, 1778 counters.ib_valid_bytes); 1779 } 1780 } 1781 1782 static 1783 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev) 1784 { 1785 int i; 1786 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 1787 (dev->data->queue_pairs); 1788 1789 PMD_INIT_FUNC_TRACE(); 1790 1791 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 1792 if (qp[i] == NULL) { 1793 PMD_DRV_LOG(DEBUG, "Uninitialised queue pair"); 1794 continue; 1795 } 1796 qp[i]->tx_vq.rx_pkts = 0; 1797 qp[i]->tx_vq.tx_pkts = 0; 1798 qp[i]->tx_vq.err_pkts = 0; 1799 qp[i]->rx_vq.rx_pkts = 0; 1800 qp[i]->rx_vq.tx_pkts = 0; 1801 qp[i]->rx_vq.err_pkts = 0; 1802 } 1803 } 1804 1805 static struct rte_cryptodev_ops crypto_ops = { 1806 .dev_configure = dpaa2_sec_dev_configure, 1807 .dev_start = dpaa2_sec_dev_start, 1808 .dev_stop = dpaa2_sec_dev_stop, 1809 .dev_close = dpaa2_sec_dev_close, 1810 .dev_infos_get = dpaa2_sec_dev_infos_get, 1811 .stats_get = dpaa2_sec_stats_get, 1812 .stats_reset = dpaa2_sec_stats_reset, 1813 .queue_pair_setup = dpaa2_sec_queue_pair_setup, 1814 .queue_pair_release = dpaa2_sec_queue_pair_release, 1815 .queue_pair_start = dpaa2_sec_queue_pair_start, 1816 .queue_pair_stop = dpaa2_sec_queue_pair_stop, 1817 .queue_pair_count = dpaa2_sec_queue_pair_count, 1818 .session_get_size = dpaa2_sec_session_get_size, 1819 .session_configure = dpaa2_sec_session_configure, 1820 .session_clear = dpaa2_sec_session_clear, 1821 }; 1822 1823 static int 1824 dpaa2_sec_uninit(const struct rte_cryptodev *dev) 1825 { 1826 struct dpaa2_sec_dev_private *internals = dev->data->dev_private; 1827 1828 rte_mempool_free(internals->fle_pool); 1829 1830 PMD_INIT_LOG(INFO, "Closing DPAA2_SEC device %s on numa socket %u\n", 1831 dev->data->name, rte_socket_id()); 1832 1833 return 0; 1834 } 1835 1836 static int 1837 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev) 1838 { 1839 struct dpaa2_sec_dev_private *internals; 1840 struct rte_device *dev = cryptodev->device; 1841 struct rte_dpaa2_device *dpaa2_dev; 1842 struct fsl_mc_io *dpseci; 1843 uint16_t token; 1844 struct dpseci_attr attr; 1845 int retcode, hw_id; 1846 char str[20]; 1847 1848 PMD_INIT_FUNC_TRACE(); 1849 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 1850 if (dpaa2_dev == NULL) { 1851 PMD_INIT_LOG(ERR, "dpaa2_device not found\n"); 1852 return -1; 1853 } 1854 hw_id = dpaa2_dev->object_id; 1855 1856 cryptodev->driver_id = cryptodev_driver_id; 1857 cryptodev->dev_ops = &crypto_ops; 1858 1859 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst; 1860 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst; 1861 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 1862 RTE_CRYPTODEV_FF_HW_ACCELERATED | 1863 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING; 1864 1865 internals = cryptodev->data->dev_private; 1866 internals->max_nb_sessions = RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS; 1867 1868 /* 1869 * For secondary processes, we don't initialise any further as primary 1870 * has already done this work. Only check we don't need a different 1871 * RX function 1872 */ 1873 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1874 PMD_INIT_LOG(DEBUG, "Device already init by primary process"); 1875 return 0; 1876 } 1877 /*Open the rte device via MC and save the handle for further use*/ 1878 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1, 1879 sizeof(struct fsl_mc_io), 0); 1880 if (!dpseci) { 1881 PMD_INIT_LOG(ERR, 1882 "Error in allocating the memory for dpsec object"); 1883 return -1; 1884 } 1885 dpseci->regs = rte_mcp_ptr_list[0]; 1886 1887 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token); 1888 if (retcode != 0) { 1889 PMD_INIT_LOG(ERR, "Cannot open the dpsec device: Error = %x", 1890 retcode); 1891 goto init_error; 1892 } 1893 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr); 1894 if (retcode != 0) { 1895 PMD_INIT_LOG(ERR, 1896 "Cannot get dpsec device attributed: Error = %x", 1897 retcode); 1898 goto init_error; 1899 } 1900 sprintf(cryptodev->data->name, "dpsec-%u", hw_id); 1901 1902 internals->max_nb_queue_pairs = attr.num_tx_queues; 1903 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs; 1904 internals->hw = dpseci; 1905 internals->token = token; 1906 1907 sprintf(str, "fle_pool_%d", cryptodev->data->dev_id); 1908 internals->fle_pool = rte_mempool_create((const char *)str, 1909 FLE_POOL_NUM_BUFS, 1910 FLE_POOL_BUF_SIZE, 1911 FLE_POOL_CACHE_SIZE, 0, 1912 NULL, NULL, NULL, NULL, 1913 SOCKET_ID_ANY, 0); 1914 if (!internals->fle_pool) { 1915 RTE_LOG(ERR, PMD, "%s create failed\n", str); 1916 goto init_error; 1917 } 1918 1919 PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name); 1920 return 0; 1921 1922 init_error: 1923 PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name); 1924 1925 /* dpaa2_sec_uninit(crypto_dev_name); */ 1926 return -EFAULT; 1927 } 1928 1929 static int 1930 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv, 1931 struct rte_dpaa2_device *dpaa2_dev) 1932 { 1933 struct rte_cryptodev *cryptodev; 1934 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 1935 1936 int retval; 1937 1938 sprintf(cryptodev_name, "dpsec-%d", dpaa2_dev->object_id); 1939 1940 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id()); 1941 if (cryptodev == NULL) 1942 return -ENOMEM; 1943 1944 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1945 cryptodev->data->dev_private = rte_zmalloc_socket( 1946 "cryptodev private structure", 1947 sizeof(struct dpaa2_sec_dev_private), 1948 RTE_CACHE_LINE_SIZE, 1949 rte_socket_id()); 1950 1951 if (cryptodev->data->dev_private == NULL) 1952 rte_panic("Cannot allocate memzone for private " 1953 "device data"); 1954 } 1955 1956 dpaa2_dev->cryptodev = cryptodev; 1957 cryptodev->device = &dpaa2_dev->device; 1958 cryptodev->device->driver = &dpaa2_drv->driver; 1959 1960 /* init user callbacks */ 1961 TAILQ_INIT(&(cryptodev->link_intr_cbs)); 1962 1963 /* Invoke PMD device initialization function */ 1964 retval = dpaa2_sec_dev_init(cryptodev); 1965 if (retval == 0) 1966 return 0; 1967 1968 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1969 rte_free(cryptodev->data->dev_private); 1970 1971 cryptodev->attached = RTE_CRYPTODEV_DETACHED; 1972 1973 return -ENXIO; 1974 } 1975 1976 static int 1977 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev) 1978 { 1979 struct rte_cryptodev *cryptodev; 1980 int ret; 1981 1982 cryptodev = dpaa2_dev->cryptodev; 1983 if (cryptodev == NULL) 1984 return -ENODEV; 1985 1986 ret = dpaa2_sec_uninit(cryptodev); 1987 if (ret) 1988 return ret; 1989 1990 /* free crypto device */ 1991 rte_cryptodev_pmd_release_device(cryptodev); 1992 1993 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1994 rte_free(cryptodev->data->dev_private); 1995 1996 cryptodev->device = NULL; 1997 cryptodev->data = NULL; 1998 1999 return 0; 2000 } 2001 2002 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = { 2003 .drv_type = DPAA2_CRYPTO, 2004 .driver = { 2005 .name = "DPAA2 SEC PMD" 2006 }, 2007 .probe = cryptodev_dpaa2_sec_probe, 2008 .remove = cryptodev_dpaa2_sec_remove, 2009 }; 2010 2011 static struct cryptodev_driver dpaa2_sec_crypto_drv; 2012 2013 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver); 2014 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, rte_dpaa2_sec_driver, 2015 cryptodev_driver_id); 2016