1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 5 * Copyright 2016 NXP. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Freescale Semiconductor, Inc nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <time.h> 35 #include <net/if.h> 36 37 #include <rte_mbuf.h> 38 #include <rte_cryptodev.h> 39 #include <rte_malloc.h> 40 #include <rte_memcpy.h> 41 #include <rte_string_fns.h> 42 #include <rte_cycles.h> 43 #include <rte_kvargs.h> 44 #include <rte_dev.h> 45 #include <rte_cryptodev_pmd.h> 46 #include <rte_common.h> 47 #include <rte_fslmc.h> 48 #include <fslmc_vfio.h> 49 #include <dpaa2_hw_pvt.h> 50 #include <dpaa2_hw_dpio.h> 51 #include <dpaa2_hw_mempool.h> 52 #include <fsl_dpseci.h> 53 #include <fsl_mc_sys.h> 54 55 #include "dpaa2_sec_priv.h" 56 #include "dpaa2_sec_logs.h" 57 58 /* RTA header files */ 59 #include <hw/desc/ipsec.h> 60 #include <hw/desc/algo.h> 61 62 /* Minimum job descriptor consists of a oneword job descriptor HEADER and 63 * a pointer to the shared descriptor 64 */ 65 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ) 66 #define FSL_VENDOR_ID 0x1957 67 #define FSL_DEVICE_ID 0x410 68 #define FSL_SUBSYSTEM_SEC 1 69 #define FSL_MC_DPSECI_DEVID 3 70 71 #define NO_PREFETCH 0 72 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */ 73 #define FLE_POOL_NUM_BUFS 32000 74 #define FLE_POOL_BUF_SIZE 256 75 #define FLE_POOL_CACHE_SIZE 512 76 77 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8; 78 79 static uint8_t cryptodev_driver_id; 80 81 static inline int 82 build_authenc_gcm_fd(dpaa2_sec_session *sess, 83 struct rte_crypto_op *op, 84 struct qbman_fd *fd, uint16_t bpid) 85 { 86 struct rte_crypto_sym_op *sym_op = op->sym; 87 struct ctxt_priv *priv = sess->ctxt; 88 struct qbman_fle *fle, *sge; 89 struct sec_flow_context *flc; 90 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len; 91 int icv_len = sess->digest_length, retval; 92 uint8_t *old_icv; 93 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 94 sess->iv.offset); 95 96 PMD_INIT_FUNC_TRACE(); 97 98 /* TODO we are using the first FLE entry to store Mbuf and session ctxt. 99 * Currently we donot know which FLE has the mbuf stored. 100 * So while retreiving we can go back 1 FLE from the FD -ADDR 101 * to get the MBUF Addr from the previous FLE. 102 * We can have a better approach to use the inline Mbuf 103 */ 104 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 105 if (retval) { 106 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n"); 107 return -1; 108 } 109 memset(fle, 0, FLE_POOL_BUF_SIZE); 110 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op)); 111 DPAA2_FLE_SAVE_CTXT(fle, priv); 112 fle = fle + 1; 113 sge = fle + 2; 114 if (likely(bpid < MAX_BPID)) { 115 DPAA2_SET_FD_BPID(fd, bpid); 116 DPAA2_SET_FLE_BPID(fle, bpid); 117 DPAA2_SET_FLE_BPID(fle + 1, bpid); 118 DPAA2_SET_FLE_BPID(sge, bpid); 119 DPAA2_SET_FLE_BPID(sge + 1, bpid); 120 DPAA2_SET_FLE_BPID(sge + 2, bpid); 121 DPAA2_SET_FLE_BPID(sge + 3, bpid); 122 } else { 123 DPAA2_SET_FD_IVP(fd); 124 DPAA2_SET_FLE_IVP(fle); 125 DPAA2_SET_FLE_IVP((fle + 1)); 126 DPAA2_SET_FLE_IVP(sge); 127 DPAA2_SET_FLE_IVP((sge + 1)); 128 DPAA2_SET_FLE_IVP((sge + 2)); 129 DPAA2_SET_FLE_IVP((sge + 3)); 130 } 131 132 /* Save the shared descriptor */ 133 flc = &priv->flc_desc[0].flc; 134 /* Configure FD as a FRAME LIST */ 135 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 136 DPAA2_SET_FD_COMPOUND_FMT(fd); 137 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 138 139 PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n" 140 "iv-len=%d data_off: 0x%x\n", 141 sym_op->aead.data.offset, 142 sym_op->aead.data.length, 143 sym_op->aead.digest.length, 144 sess->iv.length, 145 sym_op->m_src->data_off); 146 147 /* Configure Output FLE with Scatter/Gather Entry */ 148 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 149 if (auth_only_len) 150 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 151 fle->length = (sess->dir == DIR_ENC) ? 152 (sym_op->aead.data.length + icv_len + auth_only_len) : 153 sym_op->aead.data.length + auth_only_len; 154 155 DPAA2_SET_FLE_SG_EXT(fle); 156 157 /* Configure Output SGE for Encap/Decap */ 158 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 159 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + 160 sym_op->m_src->data_off - auth_only_len); 161 sge->length = sym_op->aead.data.length + auth_only_len; 162 163 if (sess->dir == DIR_ENC) { 164 sge++; 165 DPAA2_SET_FLE_ADDR(sge, 166 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data)); 167 sge->length = sess->digest_length; 168 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length + 169 sess->iv.length + auth_only_len)); 170 } 171 DPAA2_SET_FLE_FIN(sge); 172 173 sge++; 174 fle++; 175 176 /* Configure Input FLE with Scatter/Gather Entry */ 177 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 178 DPAA2_SET_FLE_SG_EXT(fle); 179 DPAA2_SET_FLE_FIN(fle); 180 fle->length = (sess->dir == DIR_ENC) ? 181 (sym_op->aead.data.length + sess->iv.length + auth_only_len) : 182 (sym_op->aead.data.length + sess->iv.length + auth_only_len + 183 sess->digest_length); 184 185 /* Configure Input SGE for Encap/Decap */ 186 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr)); 187 sge->length = sess->iv.length; 188 sge++; 189 if (auth_only_len) { 190 DPAA2_SET_FLE_ADDR(sge, 191 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data)); 192 sge->length = auth_only_len; 193 DPAA2_SET_FLE_BPID(sge, bpid); 194 sge++; 195 } 196 197 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 198 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset + 199 sym_op->m_src->data_off); 200 sge->length = sym_op->aead.data.length; 201 if (sess->dir == DIR_DEC) { 202 sge++; 203 old_icv = (uint8_t *)(sge + 1); 204 memcpy(old_icv, sym_op->aead.digest.data, 205 sess->digest_length); 206 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 207 sge->length = sess->digest_length; 208 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length + 209 sess->digest_length + 210 sess->iv.length + 211 auth_only_len)); 212 } 213 DPAA2_SET_FLE_FIN(sge); 214 215 if (auth_only_len) { 216 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 217 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 218 } 219 220 return 0; 221 } 222 223 static inline int 224 build_authenc_fd(dpaa2_sec_session *sess, 225 struct rte_crypto_op *op, 226 struct qbman_fd *fd, uint16_t bpid) 227 { 228 struct rte_crypto_sym_op *sym_op = op->sym; 229 struct ctxt_priv *priv = sess->ctxt; 230 struct qbman_fle *fle, *sge; 231 struct sec_flow_context *flc; 232 uint32_t auth_only_len = sym_op->auth.data.length - 233 sym_op->cipher.data.length; 234 int icv_len = sess->digest_length, retval; 235 uint8_t *old_icv; 236 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 237 sess->iv.offset); 238 239 PMD_INIT_FUNC_TRACE(); 240 241 /* we are using the first FLE entry to store Mbuf. 242 * Currently we donot know which FLE has the mbuf stored. 243 * So while retreiving we can go back 1 FLE from the FD -ADDR 244 * to get the MBUF Addr from the previous FLE. 245 * We can have a better approach to use the inline Mbuf 246 */ 247 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 248 if (retval) { 249 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n"); 250 return -1; 251 } 252 memset(fle, 0, FLE_POOL_BUF_SIZE); 253 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op)); 254 DPAA2_FLE_SAVE_CTXT(fle, priv); 255 fle = fle + 1; 256 sge = fle + 2; 257 if (likely(bpid < MAX_BPID)) { 258 DPAA2_SET_FD_BPID(fd, bpid); 259 DPAA2_SET_FLE_BPID(fle, bpid); 260 DPAA2_SET_FLE_BPID(fle + 1, bpid); 261 DPAA2_SET_FLE_BPID(sge, bpid); 262 DPAA2_SET_FLE_BPID(sge + 1, bpid); 263 DPAA2_SET_FLE_BPID(sge + 2, bpid); 264 DPAA2_SET_FLE_BPID(sge + 3, bpid); 265 } else { 266 DPAA2_SET_FD_IVP(fd); 267 DPAA2_SET_FLE_IVP(fle); 268 DPAA2_SET_FLE_IVP((fle + 1)); 269 DPAA2_SET_FLE_IVP(sge); 270 DPAA2_SET_FLE_IVP((sge + 1)); 271 DPAA2_SET_FLE_IVP((sge + 2)); 272 DPAA2_SET_FLE_IVP((sge + 3)); 273 } 274 275 /* Save the shared descriptor */ 276 flc = &priv->flc_desc[0].flc; 277 /* Configure FD as a FRAME LIST */ 278 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 279 DPAA2_SET_FD_COMPOUND_FMT(fd); 280 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 281 282 PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n" 283 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", 284 sym_op->auth.data.offset, 285 sym_op->auth.data.length, 286 sess->digest_length, 287 sym_op->cipher.data.offset, 288 sym_op->cipher.data.length, 289 sess->iv.length, 290 sym_op->m_src->data_off); 291 292 /* Configure Output FLE with Scatter/Gather Entry */ 293 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 294 if (auth_only_len) 295 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 296 fle->length = (sess->dir == DIR_ENC) ? 297 (sym_op->cipher.data.length + icv_len) : 298 sym_op->cipher.data.length; 299 300 DPAA2_SET_FLE_SG_EXT(fle); 301 302 /* Configure Output SGE for Encap/Decap */ 303 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 304 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + 305 sym_op->m_src->data_off); 306 sge->length = sym_op->cipher.data.length; 307 308 if (sess->dir == DIR_ENC) { 309 sge++; 310 DPAA2_SET_FLE_ADDR(sge, 311 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 312 sge->length = sess->digest_length; 313 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 314 sess->iv.length)); 315 } 316 DPAA2_SET_FLE_FIN(sge); 317 318 sge++; 319 fle++; 320 321 /* Configure Input FLE with Scatter/Gather Entry */ 322 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 323 DPAA2_SET_FLE_SG_EXT(fle); 324 DPAA2_SET_FLE_FIN(fle); 325 fle->length = (sess->dir == DIR_ENC) ? 326 (sym_op->auth.data.length + sess->iv.length) : 327 (sym_op->auth.data.length + sess->iv.length + 328 sess->digest_length); 329 330 /* Configure Input SGE for Encap/Decap */ 331 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 332 sge->length = sess->iv.length; 333 sge++; 334 335 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 336 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + 337 sym_op->m_src->data_off); 338 sge->length = sym_op->auth.data.length; 339 if (sess->dir == DIR_DEC) { 340 sge++; 341 old_icv = (uint8_t *)(sge + 1); 342 memcpy(old_icv, sym_op->auth.digest.data, 343 sess->digest_length); 344 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); 345 sge->length = sess->digest_length; 346 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length + 347 sess->digest_length + 348 sess->iv.length)); 349 } 350 DPAA2_SET_FLE_FIN(sge); 351 if (auth_only_len) { 352 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); 353 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); 354 } 355 return 0; 356 } 357 358 static inline int 359 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 360 struct qbman_fd *fd, uint16_t bpid) 361 { 362 struct rte_crypto_sym_op *sym_op = op->sym; 363 struct qbman_fle *fle, *sge; 364 struct sec_flow_context *flc; 365 struct ctxt_priv *priv = sess->ctxt; 366 uint8_t *old_digest; 367 int retval; 368 369 PMD_INIT_FUNC_TRACE(); 370 371 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 372 if (retval) { 373 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n"); 374 return -1; 375 } 376 memset(fle, 0, FLE_POOL_BUF_SIZE); 377 /* TODO we are using the first FLE entry to store Mbuf. 378 * Currently we donot know which FLE has the mbuf stored. 379 * So while retreiving we can go back 1 FLE from the FD -ADDR 380 * to get the MBUF Addr from the previous FLE. 381 * We can have a better approach to use the inline Mbuf 382 */ 383 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op)); 384 DPAA2_FLE_SAVE_CTXT(fle, priv); 385 fle = fle + 1; 386 387 if (likely(bpid < MAX_BPID)) { 388 DPAA2_SET_FD_BPID(fd, bpid); 389 DPAA2_SET_FLE_BPID(fle, bpid); 390 DPAA2_SET_FLE_BPID(fle + 1, bpid); 391 } else { 392 DPAA2_SET_FD_IVP(fd); 393 DPAA2_SET_FLE_IVP(fle); 394 DPAA2_SET_FLE_IVP((fle + 1)); 395 } 396 flc = &priv->flc_desc[DESC_INITFINAL].flc; 397 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 398 399 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data)); 400 fle->length = sess->digest_length; 401 402 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 403 DPAA2_SET_FD_COMPOUND_FMT(fd); 404 fle++; 405 406 if (sess->dir == DIR_ENC) { 407 DPAA2_SET_FLE_ADDR(fle, 408 DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 409 DPAA2_SET_FLE_OFFSET(fle, sym_op->auth.data.offset + 410 sym_op->m_src->data_off); 411 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length); 412 fle->length = sym_op->auth.data.length; 413 } else { 414 sge = fle + 2; 415 DPAA2_SET_FLE_SG_EXT(fle); 416 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 417 418 if (likely(bpid < MAX_BPID)) { 419 DPAA2_SET_FLE_BPID(sge, bpid); 420 DPAA2_SET_FLE_BPID(sge + 1, bpid); 421 } else { 422 DPAA2_SET_FLE_IVP(sge); 423 DPAA2_SET_FLE_IVP((sge + 1)); 424 } 425 DPAA2_SET_FLE_ADDR(sge, 426 DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 427 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + 428 sym_op->m_src->data_off); 429 430 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length + 431 sess->digest_length); 432 sge->length = sym_op->auth.data.length; 433 sge++; 434 old_digest = (uint8_t *)(sge + 1); 435 rte_memcpy(old_digest, sym_op->auth.digest.data, 436 sess->digest_length); 437 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest)); 438 sge->length = sess->digest_length; 439 fle->length = sym_op->auth.data.length + 440 sess->digest_length; 441 DPAA2_SET_FLE_FIN(sge); 442 } 443 DPAA2_SET_FLE_FIN(fle); 444 445 return 0; 446 } 447 448 static int 449 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 450 struct qbman_fd *fd, uint16_t bpid) 451 { 452 struct rte_crypto_sym_op *sym_op = op->sym; 453 struct qbman_fle *fle, *sge; 454 int retval; 455 struct sec_flow_context *flc; 456 struct ctxt_priv *priv = sess->ctxt; 457 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 458 sess->iv.offset); 459 460 PMD_INIT_FUNC_TRACE(); 461 462 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); 463 if (retval) { 464 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n"); 465 return -1; 466 } 467 memset(fle, 0, FLE_POOL_BUF_SIZE); 468 /* TODO we are using the first FLE entry to store Mbuf. 469 * Currently we donot know which FLE has the mbuf stored. 470 * So while retreiving we can go back 1 FLE from the FD -ADDR 471 * to get the MBUF Addr from the previous FLE. 472 * We can have a better approach to use the inline Mbuf 473 */ 474 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op)); 475 DPAA2_FLE_SAVE_CTXT(fle, priv); 476 fle = fle + 1; 477 sge = fle + 2; 478 479 if (likely(bpid < MAX_BPID)) { 480 DPAA2_SET_FD_BPID(fd, bpid); 481 DPAA2_SET_FLE_BPID(fle, bpid); 482 DPAA2_SET_FLE_BPID(fle + 1, bpid); 483 DPAA2_SET_FLE_BPID(sge, bpid); 484 DPAA2_SET_FLE_BPID(sge + 1, bpid); 485 } else { 486 DPAA2_SET_FD_IVP(fd); 487 DPAA2_SET_FLE_IVP(fle); 488 DPAA2_SET_FLE_IVP((fle + 1)); 489 DPAA2_SET_FLE_IVP(sge); 490 DPAA2_SET_FLE_IVP((sge + 1)); 491 } 492 493 flc = &priv->flc_desc[0].flc; 494 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); 495 DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length + 496 sess->iv.length); 497 DPAA2_SET_FD_COMPOUND_FMT(fd); 498 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); 499 500 PMD_TX_LOG(DEBUG, "cipher_off: 0x%x/length %d,ivlen=%d data_off: 0x%x", 501 sym_op->cipher.data.offset, 502 sym_op->cipher.data.length, 503 sess->iv.length, 504 sym_op->m_src->data_off); 505 506 DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 507 DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset + 508 sym_op->m_src->data_off); 509 510 fle->length = sym_op->cipher.data.length + sess->iv.length; 511 512 PMD_TX_LOG(DEBUG, "1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d", 513 flc, fle, fle->addr_hi, fle->addr_lo, fle->length); 514 515 fle++; 516 517 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); 518 fle->length = sym_op->cipher.data.length + sess->iv.length; 519 520 DPAA2_SET_FLE_SG_EXT(fle); 521 522 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr)); 523 sge->length = sess->iv.length; 524 525 sge++; 526 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); 527 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + 528 sym_op->m_src->data_off); 529 530 sge->length = sym_op->cipher.data.length; 531 DPAA2_SET_FLE_FIN(sge); 532 DPAA2_SET_FLE_FIN(fle); 533 534 PMD_TX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d", 535 (void *)DPAA2_GET_FD_ADDR(fd), 536 DPAA2_GET_FD_BPID(fd), 537 rte_dpaa2_bpid_info[bpid].meta_data_size, 538 DPAA2_GET_FD_OFFSET(fd), 539 DPAA2_GET_FD_LEN(fd)); 540 541 return 0; 542 } 543 544 static inline int 545 build_sec_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, 546 struct qbman_fd *fd, uint16_t bpid) 547 { 548 int ret = -1; 549 550 PMD_INIT_FUNC_TRACE(); 551 /* 552 * Segmented buffer is not supported. 553 */ 554 if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) { 555 op->status = RTE_CRYPTO_OP_STATUS_ERROR; 556 return -ENOTSUP; 557 } 558 switch (sess->ctxt_type) { 559 case DPAA2_SEC_CIPHER: 560 ret = build_cipher_fd(sess, op, fd, bpid); 561 break; 562 case DPAA2_SEC_AUTH: 563 ret = build_auth_fd(sess, op, fd, bpid); 564 break; 565 case DPAA2_SEC_AEAD: 566 ret = build_authenc_gcm_fd(sess, op, fd, bpid); 567 break; 568 case DPAA2_SEC_CIPHER_HASH: 569 ret = build_authenc_fd(sess, op, fd, bpid); 570 break; 571 case DPAA2_SEC_HASH_CIPHER: 572 default: 573 RTE_LOG(ERR, PMD, "error: Unsupported session\n"); 574 } 575 return ret; 576 } 577 578 static uint16_t 579 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, 580 uint16_t nb_ops) 581 { 582 /* Function to transmit the frames to given device and VQ*/ 583 uint32_t loop; 584 int32_t ret; 585 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 586 uint32_t frames_to_send; 587 struct qbman_eq_desc eqdesc; 588 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 589 struct qbman_swp *swp; 590 uint16_t num_tx = 0; 591 /*todo - need to support multiple buffer pools */ 592 uint16_t bpid; 593 struct rte_mempool *mb_pool; 594 dpaa2_sec_session *sess; 595 596 if (unlikely(nb_ops == 0)) 597 return 0; 598 599 if (ops[0]->sess_type != RTE_CRYPTO_OP_WITH_SESSION) { 600 RTE_LOG(ERR, PMD, "sessionless crypto op not supported\n"); 601 return 0; 602 } 603 /*Prepare enqueue descriptor*/ 604 qbman_eq_desc_clear(&eqdesc); 605 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); 606 qbman_eq_desc_set_response(&eqdesc, 0, 0); 607 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid); 608 609 if (!DPAA2_PER_LCORE_SEC_DPIO) { 610 ret = dpaa2_affine_qbman_swp_sec(); 611 if (ret) { 612 RTE_LOG(ERR, PMD, "Failure in affining portal\n"); 613 return 0; 614 } 615 } 616 swp = DPAA2_PER_LCORE_SEC_PORTAL; 617 618 while (nb_ops) { 619 frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops; 620 621 for (loop = 0; loop < frames_to_send; loop++) { 622 /*Clear the unused FD fields before sending*/ 623 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd)); 624 sess = (dpaa2_sec_session *) 625 get_session_private_data( 626 (*ops)->sym->session, 627 cryptodev_driver_id); 628 mb_pool = (*ops)->sym->m_src->pool; 629 bpid = mempool_to_bpid(mb_pool); 630 ret = build_sec_fd(sess, *ops, &fd_arr[loop], bpid); 631 if (ret) { 632 PMD_DRV_LOG(ERR, "error: Improper packet" 633 " contents for crypto operation\n"); 634 goto skip_tx; 635 } 636 ops++; 637 } 638 loop = 0; 639 while (loop < frames_to_send) { 640 loop += qbman_swp_enqueue_multiple(swp, &eqdesc, 641 &fd_arr[loop], 642 frames_to_send - loop); 643 } 644 645 num_tx += frames_to_send; 646 nb_ops -= frames_to_send; 647 } 648 skip_tx: 649 dpaa2_qp->tx_vq.tx_pkts += num_tx; 650 dpaa2_qp->tx_vq.err_pkts += nb_ops; 651 return num_tx; 652 } 653 654 static inline struct rte_crypto_op * 655 sec_fd_to_mbuf(const struct qbman_fd *fd) 656 { 657 struct qbman_fle *fle; 658 struct rte_crypto_op *op; 659 struct ctxt_priv *priv; 660 661 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); 662 663 PMD_RX_LOG(DEBUG, "FLE addr = %x - %x, offset = %x", 664 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset); 665 666 /* we are using the first FLE entry to store Mbuf. 667 * Currently we donot know which FLE has the mbuf stored. 668 * So while retreiving we can go back 1 FLE from the FD -ADDR 669 * to get the MBUF Addr from the previous FLE. 670 * We can have a better approach to use the inline Mbuf 671 */ 672 673 if (unlikely(DPAA2_GET_FD_IVP(fd))) { 674 /* TODO complete it. */ 675 RTE_LOG(ERR, PMD, "error: Non inline buffer - WHAT to DO?\n"); 676 return NULL; 677 } 678 op = (struct rte_crypto_op *)DPAA2_IOVA_TO_VADDR( 679 DPAA2_GET_FLE_ADDR((fle - 1))); 680 681 /* Prefeth op */ 682 rte_prefetch0(op->sym->m_src); 683 684 PMD_RX_LOG(DEBUG, "mbuf %p BMAN buf addr %p", 685 (void *)op->sym->m_src, op->sym->m_src->buf_addr); 686 687 PMD_RX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d", 688 (void *)DPAA2_GET_FD_ADDR(fd), 689 DPAA2_GET_FD_BPID(fd), 690 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, 691 DPAA2_GET_FD_OFFSET(fd), 692 DPAA2_GET_FD_LEN(fd)); 693 694 /* free the fle memory */ 695 priv = (struct ctxt_priv *)DPAA2_GET_FLE_CTXT(fle - 1); 696 rte_mempool_put(priv->fle_pool, (void *)(fle - 1)); 697 698 return op; 699 } 700 701 static uint16_t 702 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, 703 uint16_t nb_ops) 704 { 705 /* Function is responsible to receive frames for a given device and VQ*/ 706 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; 707 struct qbman_result *dq_storage; 708 uint32_t fqid = dpaa2_qp->rx_vq.fqid; 709 int ret, num_rx = 0; 710 uint8_t is_last = 0, status; 711 struct qbman_swp *swp; 712 const struct qbman_fd *fd; 713 struct qbman_pull_desc pulldesc; 714 715 if (!DPAA2_PER_LCORE_SEC_DPIO) { 716 ret = dpaa2_affine_qbman_swp_sec(); 717 if (ret) { 718 RTE_LOG(ERR, PMD, "Failure in affining portal\n"); 719 return 0; 720 } 721 } 722 swp = DPAA2_PER_LCORE_SEC_PORTAL; 723 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0]; 724 725 qbman_pull_desc_clear(&pulldesc); 726 qbman_pull_desc_set_numframes(&pulldesc, 727 (nb_ops > DPAA2_DQRR_RING_SIZE) ? 728 DPAA2_DQRR_RING_SIZE : nb_ops); 729 qbman_pull_desc_set_fq(&pulldesc, fqid); 730 qbman_pull_desc_set_storage(&pulldesc, dq_storage, 731 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage), 732 1); 733 734 /*Issue a volatile dequeue command. */ 735 while (1) { 736 if (qbman_swp_pull(swp, &pulldesc)) { 737 RTE_LOG(WARNING, PMD, 738 "SEC VDQ command is not issued : QBMAN busy\n"); 739 /* Portal was busy, try again */ 740 continue; 741 } 742 break; 743 }; 744 745 /* Receive the packets till Last Dequeue entry is found with 746 * respect to the above issues PULL command. 747 */ 748 while (!is_last) { 749 /* Check if the previous issued command is completed. 750 * Also seems like the SWP is shared between the Ethernet Driver 751 * and the SEC driver. 752 */ 753 while (!qbman_check_command_complete(dq_storage)) 754 ; 755 756 /* Loop until the dq_storage is updated with 757 * new token by QBMAN 758 */ 759 while (!qbman_check_new_result(dq_storage)) 760 ; 761 /* Check whether Last Pull command is Expired and 762 * setting Condition for Loop termination 763 */ 764 if (qbman_result_DQ_is_pull_complete(dq_storage)) { 765 is_last = 1; 766 /* Check for valid frame. */ 767 status = (uint8_t)qbman_result_DQ_flags(dq_storage); 768 if (unlikely( 769 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) { 770 PMD_RX_LOG(DEBUG, "No frame is delivered"); 771 continue; 772 } 773 } 774 775 fd = qbman_result_DQ_fd(dq_storage); 776 ops[num_rx] = sec_fd_to_mbuf(fd); 777 778 if (unlikely(fd->simple.frc)) { 779 /* TODO Parse SEC errors */ 780 RTE_LOG(ERR, PMD, "SEC returned Error - %x\n", 781 fd->simple.frc); 782 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR; 783 } else { 784 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 785 } 786 787 num_rx++; 788 dq_storage++; 789 } /* End of Packet Rx loop */ 790 791 dpaa2_qp->rx_vq.rx_pkts += num_rx; 792 793 PMD_RX_LOG(DEBUG, "SEC Received %d Packets", num_rx); 794 /*Return the total number of packets received to DPAA2 app*/ 795 return num_rx; 796 } 797 798 /** Release queue pair */ 799 static int 800 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) 801 { 802 struct dpaa2_sec_qp *qp = 803 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id]; 804 805 PMD_INIT_FUNC_TRACE(); 806 807 if (qp->rx_vq.q_storage) { 808 dpaa2_free_dq_storage(qp->rx_vq.q_storage); 809 rte_free(qp->rx_vq.q_storage); 810 } 811 rte_free(qp); 812 813 dev->data->queue_pairs[queue_pair_id] = NULL; 814 815 return 0; 816 } 817 818 /** Setup a queue pair */ 819 static int 820 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, 821 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf, 822 __rte_unused int socket_id, 823 __rte_unused struct rte_mempool *session_pool) 824 { 825 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 826 struct dpaa2_sec_qp *qp; 827 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 828 struct dpseci_rx_queue_cfg cfg; 829 int32_t retcode; 830 831 PMD_INIT_FUNC_TRACE(); 832 833 /* If qp is already in use free ring memory and qp metadata. */ 834 if (dev->data->queue_pairs[qp_id] != NULL) { 835 PMD_DRV_LOG(INFO, "QP already setup"); 836 return 0; 837 } 838 839 PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, conf =%p", 840 dev, qp_id, qp_conf); 841 842 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); 843 844 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp), 845 RTE_CACHE_LINE_SIZE); 846 if (!qp) { 847 RTE_LOG(ERR, PMD, "malloc failed for rx/tx queues\n"); 848 return -1; 849 } 850 851 qp->rx_vq.dev = dev; 852 qp->tx_vq.dev = dev; 853 qp->rx_vq.q_storage = rte_malloc("sec dq storage", 854 sizeof(struct queue_storage_info_t), 855 RTE_CACHE_LINE_SIZE); 856 if (!qp->rx_vq.q_storage) { 857 RTE_LOG(ERR, PMD, "malloc failed for q_storage\n"); 858 return -1; 859 } 860 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t)); 861 862 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) { 863 RTE_LOG(ERR, PMD, "dpaa2_alloc_dq_storage failed\n"); 864 return -1; 865 } 866 867 dev->data->queue_pairs[qp_id] = qp; 868 869 cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX; 870 cfg.user_ctx = (uint64_t)(&qp->rx_vq); 871 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, 872 qp_id, &cfg); 873 return retcode; 874 } 875 876 /** Start queue pair */ 877 static int 878 dpaa2_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev, 879 __rte_unused uint16_t queue_pair_id) 880 { 881 PMD_INIT_FUNC_TRACE(); 882 883 return 0; 884 } 885 886 /** Stop queue pair */ 887 static int 888 dpaa2_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev, 889 __rte_unused uint16_t queue_pair_id) 890 { 891 PMD_INIT_FUNC_TRACE(); 892 893 return 0; 894 } 895 896 /** Return the number of allocated queue pairs */ 897 static uint32_t 898 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev) 899 { 900 PMD_INIT_FUNC_TRACE(); 901 902 return dev->data->nb_queue_pairs; 903 } 904 905 /** Returns the size of the aesni gcm session structure */ 906 static unsigned int 907 dpaa2_sec_session_get_size(struct rte_cryptodev *dev __rte_unused) 908 { 909 PMD_INIT_FUNC_TRACE(); 910 911 return sizeof(dpaa2_sec_session); 912 } 913 914 static int 915 dpaa2_sec_cipher_init(struct rte_cryptodev *dev, 916 struct rte_crypto_sym_xform *xform, 917 dpaa2_sec_session *session) 918 { 919 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 920 struct alginfo cipherdata; 921 int bufsize, i; 922 struct ctxt_priv *priv; 923 struct sec_flow_context *flc; 924 925 PMD_INIT_FUNC_TRACE(); 926 927 /* For SEC CIPHER only one descriptor is required. */ 928 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 929 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 930 RTE_CACHE_LINE_SIZE); 931 if (priv == NULL) { 932 RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n"); 933 return -1; 934 } 935 936 priv->fle_pool = dev_priv->fle_pool; 937 938 flc = &priv->flc_desc[0].flc; 939 940 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, 941 RTE_CACHE_LINE_SIZE); 942 if (session->cipher_key.data == NULL) { 943 RTE_LOG(ERR, PMD, "No Memory for cipher key\n"); 944 rte_free(priv); 945 return -1; 946 } 947 session->cipher_key.length = xform->cipher.key.length; 948 949 memcpy(session->cipher_key.data, xform->cipher.key.data, 950 xform->cipher.key.length); 951 cipherdata.key = (uint64_t)session->cipher_key.data; 952 cipherdata.keylen = session->cipher_key.length; 953 cipherdata.key_enc_flags = 0; 954 cipherdata.key_type = RTA_DATA_IMM; 955 956 /* Set IV parameters */ 957 session->iv.offset = xform->cipher.iv.offset; 958 session->iv.length = xform->cipher.iv.length; 959 960 switch (xform->cipher.algo) { 961 case RTE_CRYPTO_CIPHER_AES_CBC: 962 cipherdata.algtype = OP_ALG_ALGSEL_AES; 963 cipherdata.algmode = OP_ALG_AAI_CBC; 964 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 965 break; 966 case RTE_CRYPTO_CIPHER_3DES_CBC: 967 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 968 cipherdata.algmode = OP_ALG_AAI_CBC; 969 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 970 break; 971 case RTE_CRYPTO_CIPHER_AES_CTR: 972 cipherdata.algtype = OP_ALG_ALGSEL_AES; 973 cipherdata.algmode = OP_ALG_AAI_CTR; 974 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 975 break; 976 case RTE_CRYPTO_CIPHER_3DES_CTR: 977 case RTE_CRYPTO_CIPHER_AES_ECB: 978 case RTE_CRYPTO_CIPHER_3DES_ECB: 979 case RTE_CRYPTO_CIPHER_AES_XTS: 980 case RTE_CRYPTO_CIPHER_AES_F8: 981 case RTE_CRYPTO_CIPHER_ARC4: 982 case RTE_CRYPTO_CIPHER_KASUMI_F8: 983 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 984 case RTE_CRYPTO_CIPHER_ZUC_EEA3: 985 case RTE_CRYPTO_CIPHER_NULL: 986 RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n", 987 xform->cipher.algo); 988 goto error_out; 989 default: 990 RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n", 991 xform->cipher.algo); 992 goto error_out; 993 } 994 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 995 DIR_ENC : DIR_DEC; 996 997 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, 998 &cipherdata, NULL, session->iv.length, 999 session->dir); 1000 if (bufsize < 0) { 1001 RTE_LOG(ERR, PMD, "Crypto: Descriptor build failed\n"); 1002 goto error_out; 1003 } 1004 flc->dhr = 0; 1005 flc->bpv0 = 0x1; 1006 flc->mode_bits = 0x8000; 1007 1008 flc->word1_sdl = (uint8_t)bufsize; 1009 flc->word2_rflc_31_0 = lower_32_bits( 1010 (uint64_t)&(((struct dpaa2_sec_qp *) 1011 dev->data->queue_pairs[0])->rx_vq)); 1012 flc->word3_rflc_63_32 = upper_32_bits( 1013 (uint64_t)&(((struct dpaa2_sec_qp *) 1014 dev->data->queue_pairs[0])->rx_vq)); 1015 session->ctxt = priv; 1016 1017 for (i = 0; i < bufsize; i++) 1018 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n", 1019 i, priv->flc_desc[0].desc[i]); 1020 1021 return 0; 1022 1023 error_out: 1024 rte_free(session->cipher_key.data); 1025 rte_free(priv); 1026 return -1; 1027 } 1028 1029 static int 1030 dpaa2_sec_auth_init(struct rte_cryptodev *dev, 1031 struct rte_crypto_sym_xform *xform, 1032 dpaa2_sec_session *session) 1033 { 1034 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1035 struct alginfo authdata; 1036 unsigned int bufsize, i; 1037 struct ctxt_priv *priv; 1038 struct sec_flow_context *flc; 1039 1040 PMD_INIT_FUNC_TRACE(); 1041 1042 /* For SEC AUTH three descriptors are required for various stages */ 1043 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1044 sizeof(struct ctxt_priv) + 3 * 1045 sizeof(struct sec_flc_desc), 1046 RTE_CACHE_LINE_SIZE); 1047 if (priv == NULL) { 1048 RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n"); 1049 return -1; 1050 } 1051 1052 priv->fle_pool = dev_priv->fle_pool; 1053 flc = &priv->flc_desc[DESC_INITFINAL].flc; 1054 1055 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length, 1056 RTE_CACHE_LINE_SIZE); 1057 if (session->auth_key.data == NULL) { 1058 RTE_LOG(ERR, PMD, "No Memory for auth key\n"); 1059 rte_free(priv); 1060 return -1; 1061 } 1062 session->auth_key.length = xform->auth.key.length; 1063 1064 memcpy(session->auth_key.data, xform->auth.key.data, 1065 xform->auth.key.length); 1066 authdata.key = (uint64_t)session->auth_key.data; 1067 authdata.keylen = session->auth_key.length; 1068 authdata.key_enc_flags = 0; 1069 authdata.key_type = RTA_DATA_IMM; 1070 1071 session->digest_length = xform->auth.digest_length; 1072 1073 switch (xform->auth.algo) { 1074 case RTE_CRYPTO_AUTH_SHA1_HMAC: 1075 authdata.algtype = OP_ALG_ALGSEL_SHA1; 1076 authdata.algmode = OP_ALG_AAI_HMAC; 1077 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 1078 break; 1079 case RTE_CRYPTO_AUTH_MD5_HMAC: 1080 authdata.algtype = OP_ALG_ALGSEL_MD5; 1081 authdata.algmode = OP_ALG_AAI_HMAC; 1082 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 1083 break; 1084 case RTE_CRYPTO_AUTH_SHA256_HMAC: 1085 authdata.algtype = OP_ALG_ALGSEL_SHA256; 1086 authdata.algmode = OP_ALG_AAI_HMAC; 1087 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 1088 break; 1089 case RTE_CRYPTO_AUTH_SHA384_HMAC: 1090 authdata.algtype = OP_ALG_ALGSEL_SHA384; 1091 authdata.algmode = OP_ALG_AAI_HMAC; 1092 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 1093 break; 1094 case RTE_CRYPTO_AUTH_SHA512_HMAC: 1095 authdata.algtype = OP_ALG_ALGSEL_SHA512; 1096 authdata.algmode = OP_ALG_AAI_HMAC; 1097 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 1098 break; 1099 case RTE_CRYPTO_AUTH_SHA224_HMAC: 1100 authdata.algtype = OP_ALG_ALGSEL_SHA224; 1101 authdata.algmode = OP_ALG_AAI_HMAC; 1102 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 1103 break; 1104 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 1105 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 1106 case RTE_CRYPTO_AUTH_NULL: 1107 case RTE_CRYPTO_AUTH_SHA1: 1108 case RTE_CRYPTO_AUTH_SHA256: 1109 case RTE_CRYPTO_AUTH_SHA512: 1110 case RTE_CRYPTO_AUTH_SHA224: 1111 case RTE_CRYPTO_AUTH_SHA384: 1112 case RTE_CRYPTO_AUTH_MD5: 1113 case RTE_CRYPTO_AUTH_AES_GMAC: 1114 case RTE_CRYPTO_AUTH_KASUMI_F9: 1115 case RTE_CRYPTO_AUTH_AES_CMAC: 1116 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 1117 case RTE_CRYPTO_AUTH_ZUC_EIA3: 1118 RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n", 1119 xform->auth.algo); 1120 goto error_out; 1121 default: 1122 RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n", 1123 xform->auth.algo); 1124 goto error_out; 1125 } 1126 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? 1127 DIR_ENC : DIR_DEC; 1128 1129 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 1130 1, 0, &authdata, !session->dir, 1131 session->digest_length); 1132 1133 flc->word1_sdl = (uint8_t)bufsize; 1134 flc->word2_rflc_31_0 = lower_32_bits( 1135 (uint64_t)&(((struct dpaa2_sec_qp *) 1136 dev->data->queue_pairs[0])->rx_vq)); 1137 flc->word3_rflc_63_32 = upper_32_bits( 1138 (uint64_t)&(((struct dpaa2_sec_qp *) 1139 dev->data->queue_pairs[0])->rx_vq)); 1140 session->ctxt = priv; 1141 for (i = 0; i < bufsize; i++) 1142 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n", 1143 i, priv->flc_desc[DESC_INITFINAL].desc[i]); 1144 1145 1146 return 0; 1147 1148 error_out: 1149 rte_free(session->auth_key.data); 1150 rte_free(priv); 1151 return -1; 1152 } 1153 1154 static int 1155 dpaa2_sec_aead_init(struct rte_cryptodev *dev, 1156 struct rte_crypto_sym_xform *xform, 1157 dpaa2_sec_session *session) 1158 { 1159 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt; 1160 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1161 struct alginfo aeaddata; 1162 unsigned int bufsize, i; 1163 struct ctxt_priv *priv; 1164 struct sec_flow_context *flc; 1165 struct rte_crypto_aead_xform *aead_xform = &xform->aead; 1166 int err; 1167 1168 PMD_INIT_FUNC_TRACE(); 1169 1170 /* Set IV parameters */ 1171 session->iv.offset = aead_xform->iv.offset; 1172 session->iv.length = aead_xform->iv.length; 1173 session->ctxt_type = DPAA2_SEC_AEAD; 1174 1175 /* For SEC AEAD only one descriptor is required */ 1176 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1177 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 1178 RTE_CACHE_LINE_SIZE); 1179 if (priv == NULL) { 1180 RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n"); 1181 return -1; 1182 } 1183 1184 priv->fle_pool = dev_priv->fle_pool; 1185 flc = &priv->flc_desc[0].flc; 1186 1187 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, 1188 RTE_CACHE_LINE_SIZE); 1189 if (session->aead_key.data == NULL && aead_xform->key.length > 0) { 1190 RTE_LOG(ERR, PMD, "No Memory for aead key\n"); 1191 rte_free(priv); 1192 return -1; 1193 } 1194 memcpy(session->aead_key.data, aead_xform->key.data, 1195 aead_xform->key.length); 1196 1197 session->digest_length = aead_xform->digest_length; 1198 session->aead_key.length = aead_xform->key.length; 1199 ctxt->auth_only_len = aead_xform->aad_length; 1200 1201 aeaddata.key = (uint64_t)session->aead_key.data; 1202 aeaddata.keylen = session->aead_key.length; 1203 aeaddata.key_enc_flags = 0; 1204 aeaddata.key_type = RTA_DATA_IMM; 1205 1206 switch (aead_xform->algo) { 1207 case RTE_CRYPTO_AEAD_AES_GCM: 1208 aeaddata.algtype = OP_ALG_ALGSEL_AES; 1209 aeaddata.algmode = OP_ALG_AAI_GCM; 1210 session->cipher_alg = RTE_CRYPTO_AEAD_AES_GCM; 1211 break; 1212 case RTE_CRYPTO_AEAD_AES_CCM: 1213 RTE_LOG(ERR, PMD, "Crypto: Unsupported AEAD alg %u\n", 1214 aead_xform->algo); 1215 goto error_out; 1216 default: 1217 RTE_LOG(ERR, PMD, "Crypto: Undefined AEAD specified %u\n", 1218 aead_xform->algo); 1219 goto error_out; 1220 } 1221 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 1222 DIR_ENC : DIR_DEC; 1223 1224 priv->flc_desc[0].desc[0] = aeaddata.keylen; 1225 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 1226 MIN_JOB_DESC_SIZE, 1227 (unsigned int *)priv->flc_desc[0].desc, 1228 &priv->flc_desc[0].desc[1], 1); 1229 1230 if (err < 0) { 1231 PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths\n"); 1232 goto error_out; 1233 } 1234 if (priv->flc_desc[0].desc[1] & 1) { 1235 aeaddata.key_type = RTA_DATA_IMM; 1236 } else { 1237 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key); 1238 aeaddata.key_type = RTA_DATA_PTR; 1239 } 1240 priv->flc_desc[0].desc[0] = 0; 1241 priv->flc_desc[0].desc[1] = 0; 1242 1243 if (session->dir == DIR_ENC) 1244 bufsize = cnstr_shdsc_gcm_encap( 1245 priv->flc_desc[0].desc, 1, 0, 1246 &aeaddata, session->iv.length, 1247 session->digest_length); 1248 else 1249 bufsize = cnstr_shdsc_gcm_decap( 1250 priv->flc_desc[0].desc, 1, 0, 1251 &aeaddata, session->iv.length, 1252 session->digest_length); 1253 flc->word1_sdl = (uint8_t)bufsize; 1254 flc->word2_rflc_31_0 = lower_32_bits( 1255 (uint64_t)&(((struct dpaa2_sec_qp *) 1256 dev->data->queue_pairs[0])->rx_vq)); 1257 flc->word3_rflc_63_32 = upper_32_bits( 1258 (uint64_t)&(((struct dpaa2_sec_qp *) 1259 dev->data->queue_pairs[0])->rx_vq)); 1260 session->ctxt = priv; 1261 for (i = 0; i < bufsize; i++) 1262 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n", 1263 i, priv->flc_desc[0].desc[i]); 1264 1265 return 0; 1266 1267 error_out: 1268 rte_free(session->aead_key.data); 1269 rte_free(priv); 1270 return -1; 1271 } 1272 1273 1274 static int 1275 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, 1276 struct rte_crypto_sym_xform *xform, 1277 dpaa2_sec_session *session) 1278 { 1279 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt; 1280 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; 1281 struct alginfo authdata, cipherdata; 1282 unsigned int bufsize, i; 1283 struct ctxt_priv *priv; 1284 struct sec_flow_context *flc; 1285 struct rte_crypto_cipher_xform *cipher_xform; 1286 struct rte_crypto_auth_xform *auth_xform; 1287 int err; 1288 1289 PMD_INIT_FUNC_TRACE(); 1290 1291 if (session->ext_params.aead_ctxt.auth_cipher_text) { 1292 cipher_xform = &xform->cipher; 1293 auth_xform = &xform->next->auth; 1294 session->ctxt_type = 1295 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1296 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER; 1297 } else { 1298 cipher_xform = &xform->next->cipher; 1299 auth_xform = &xform->auth; 1300 session->ctxt_type = 1301 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1302 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH; 1303 } 1304 1305 /* Set IV parameters */ 1306 session->iv.offset = cipher_xform->iv.offset; 1307 session->iv.length = cipher_xform->iv.length; 1308 1309 /* For SEC AEAD only one descriptor is required */ 1310 priv = (struct ctxt_priv *)rte_zmalloc(NULL, 1311 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), 1312 RTE_CACHE_LINE_SIZE); 1313 if (priv == NULL) { 1314 RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n"); 1315 return -1; 1316 } 1317 1318 priv->fle_pool = dev_priv->fle_pool; 1319 flc = &priv->flc_desc[0].flc; 1320 1321 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length, 1322 RTE_CACHE_LINE_SIZE); 1323 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) { 1324 RTE_LOG(ERR, PMD, "No Memory for cipher key\n"); 1325 rte_free(priv); 1326 return -1; 1327 } 1328 session->cipher_key.length = cipher_xform->key.length; 1329 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length, 1330 RTE_CACHE_LINE_SIZE); 1331 if (session->auth_key.data == NULL && auth_xform->key.length > 0) { 1332 RTE_LOG(ERR, PMD, "No Memory for auth key\n"); 1333 rte_free(session->cipher_key.data); 1334 rte_free(priv); 1335 return -1; 1336 } 1337 session->auth_key.length = auth_xform->key.length; 1338 memcpy(session->cipher_key.data, cipher_xform->key.data, 1339 cipher_xform->key.length); 1340 memcpy(session->auth_key.data, auth_xform->key.data, 1341 auth_xform->key.length); 1342 1343 authdata.key = (uint64_t)session->auth_key.data; 1344 authdata.keylen = session->auth_key.length; 1345 authdata.key_enc_flags = 0; 1346 authdata.key_type = RTA_DATA_IMM; 1347 1348 session->digest_length = auth_xform->digest_length; 1349 1350 switch (auth_xform->algo) { 1351 case RTE_CRYPTO_AUTH_SHA1_HMAC: 1352 authdata.algtype = OP_ALG_ALGSEL_SHA1; 1353 authdata.algmode = OP_ALG_AAI_HMAC; 1354 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 1355 break; 1356 case RTE_CRYPTO_AUTH_MD5_HMAC: 1357 authdata.algtype = OP_ALG_ALGSEL_MD5; 1358 authdata.algmode = OP_ALG_AAI_HMAC; 1359 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 1360 break; 1361 case RTE_CRYPTO_AUTH_SHA224_HMAC: 1362 authdata.algtype = OP_ALG_ALGSEL_SHA224; 1363 authdata.algmode = OP_ALG_AAI_HMAC; 1364 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC; 1365 break; 1366 case RTE_CRYPTO_AUTH_SHA256_HMAC: 1367 authdata.algtype = OP_ALG_ALGSEL_SHA256; 1368 authdata.algmode = OP_ALG_AAI_HMAC; 1369 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 1370 break; 1371 case RTE_CRYPTO_AUTH_SHA384_HMAC: 1372 authdata.algtype = OP_ALG_ALGSEL_SHA384; 1373 authdata.algmode = OP_ALG_AAI_HMAC; 1374 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 1375 break; 1376 case RTE_CRYPTO_AUTH_SHA512_HMAC: 1377 authdata.algtype = OP_ALG_ALGSEL_SHA512; 1378 authdata.algmode = OP_ALG_AAI_HMAC; 1379 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 1380 break; 1381 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 1382 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 1383 case RTE_CRYPTO_AUTH_NULL: 1384 case RTE_CRYPTO_AUTH_SHA1: 1385 case RTE_CRYPTO_AUTH_SHA256: 1386 case RTE_CRYPTO_AUTH_SHA512: 1387 case RTE_CRYPTO_AUTH_SHA224: 1388 case RTE_CRYPTO_AUTH_SHA384: 1389 case RTE_CRYPTO_AUTH_MD5: 1390 case RTE_CRYPTO_AUTH_AES_GMAC: 1391 case RTE_CRYPTO_AUTH_KASUMI_F9: 1392 case RTE_CRYPTO_AUTH_AES_CMAC: 1393 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 1394 case RTE_CRYPTO_AUTH_ZUC_EIA3: 1395 RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n", 1396 auth_xform->algo); 1397 goto error_out; 1398 default: 1399 RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n", 1400 auth_xform->algo); 1401 goto error_out; 1402 } 1403 cipherdata.key = (uint64_t)session->cipher_key.data; 1404 cipherdata.keylen = session->cipher_key.length; 1405 cipherdata.key_enc_flags = 0; 1406 cipherdata.key_type = RTA_DATA_IMM; 1407 1408 switch (cipher_xform->algo) { 1409 case RTE_CRYPTO_CIPHER_AES_CBC: 1410 cipherdata.algtype = OP_ALG_ALGSEL_AES; 1411 cipherdata.algmode = OP_ALG_AAI_CBC; 1412 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 1413 break; 1414 case RTE_CRYPTO_CIPHER_3DES_CBC: 1415 cipherdata.algtype = OP_ALG_ALGSEL_3DES; 1416 cipherdata.algmode = OP_ALG_AAI_CBC; 1417 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 1418 break; 1419 case RTE_CRYPTO_CIPHER_AES_CTR: 1420 cipherdata.algtype = OP_ALG_ALGSEL_AES; 1421 cipherdata.algmode = OP_ALG_AAI_CTR; 1422 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 1423 break; 1424 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 1425 case RTE_CRYPTO_CIPHER_NULL: 1426 case RTE_CRYPTO_CIPHER_3DES_ECB: 1427 case RTE_CRYPTO_CIPHER_AES_ECB: 1428 case RTE_CRYPTO_CIPHER_KASUMI_F8: 1429 RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n", 1430 cipher_xform->algo); 1431 goto error_out; 1432 default: 1433 RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n", 1434 cipher_xform->algo); 1435 goto error_out; 1436 } 1437 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1438 DIR_ENC : DIR_DEC; 1439 1440 priv->flc_desc[0].desc[0] = cipherdata.keylen; 1441 priv->flc_desc[0].desc[1] = authdata.keylen; 1442 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 1443 MIN_JOB_DESC_SIZE, 1444 (unsigned int *)priv->flc_desc[0].desc, 1445 &priv->flc_desc[0].desc[2], 2); 1446 1447 if (err < 0) { 1448 PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths\n"); 1449 goto error_out; 1450 } 1451 if (priv->flc_desc[0].desc[2] & 1) { 1452 cipherdata.key_type = RTA_DATA_IMM; 1453 } else { 1454 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); 1455 cipherdata.key_type = RTA_DATA_PTR; 1456 } 1457 if (priv->flc_desc[0].desc[2] & (1 << 1)) { 1458 authdata.key_type = RTA_DATA_IMM; 1459 } else { 1460 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key); 1461 authdata.key_type = RTA_DATA_PTR; 1462 } 1463 priv->flc_desc[0].desc[0] = 0; 1464 priv->flc_desc[0].desc[1] = 0; 1465 priv->flc_desc[0].desc[2] = 0; 1466 1467 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) { 1468 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1, 1469 0, &cipherdata, &authdata, 1470 session->iv.length, 1471 ctxt->auth_only_len, 1472 session->digest_length, 1473 session->dir); 1474 } else { 1475 RTE_LOG(ERR, PMD, "Hash before cipher not supported\n"); 1476 goto error_out; 1477 } 1478 1479 flc->word1_sdl = (uint8_t)bufsize; 1480 flc->word2_rflc_31_0 = lower_32_bits( 1481 (uint64_t)&(((struct dpaa2_sec_qp *) 1482 dev->data->queue_pairs[0])->rx_vq)); 1483 flc->word3_rflc_63_32 = upper_32_bits( 1484 (uint64_t)&(((struct dpaa2_sec_qp *) 1485 dev->data->queue_pairs[0])->rx_vq)); 1486 session->ctxt = priv; 1487 for (i = 0; i < bufsize; i++) 1488 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n", 1489 i, priv->flc_desc[0].desc[i]); 1490 1491 return 0; 1492 1493 error_out: 1494 rte_free(session->cipher_key.data); 1495 rte_free(session->auth_key.data); 1496 rte_free(priv); 1497 return -1; 1498 } 1499 1500 static int 1501 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev, 1502 struct rte_crypto_sym_xform *xform, void *sess) 1503 { 1504 dpaa2_sec_session *session = sess; 1505 1506 PMD_INIT_FUNC_TRACE(); 1507 1508 if (unlikely(sess == NULL)) { 1509 RTE_LOG(ERR, PMD, "invalid session struct\n"); 1510 return -1; 1511 } 1512 1513 /* Default IV length = 0 */ 1514 session->iv.length = 0; 1515 1516 /* Cipher Only */ 1517 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 1518 session->ctxt_type = DPAA2_SEC_CIPHER; 1519 dpaa2_sec_cipher_init(dev, xform, session); 1520 1521 /* Authentication Only */ 1522 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 1523 xform->next == NULL) { 1524 session->ctxt_type = DPAA2_SEC_AUTH; 1525 dpaa2_sec_auth_init(dev, xform, session); 1526 1527 /* Cipher then Authenticate */ 1528 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 1529 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 1530 session->ext_params.aead_ctxt.auth_cipher_text = true; 1531 dpaa2_sec_aead_chain_init(dev, xform, session); 1532 1533 /* Authenticate then Cipher */ 1534 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 1535 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 1536 session->ext_params.aead_ctxt.auth_cipher_text = false; 1537 dpaa2_sec_aead_chain_init(dev, xform, session); 1538 1539 /* AEAD operation for AES-GCM kind of Algorithms */ 1540 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && 1541 xform->next == NULL) { 1542 dpaa2_sec_aead_init(dev, xform, session); 1543 1544 } else { 1545 RTE_LOG(ERR, PMD, "Invalid crypto type\n"); 1546 return -EINVAL; 1547 } 1548 1549 return 0; 1550 } 1551 1552 static int 1553 dpaa2_sec_session_configure(struct rte_cryptodev *dev, 1554 struct rte_crypto_sym_xform *xform, 1555 struct rte_cryptodev_sym_session *sess, 1556 struct rte_mempool *mempool) 1557 { 1558 void *sess_private_data; 1559 int ret; 1560 1561 if (rte_mempool_get(mempool, &sess_private_data)) { 1562 CDEV_LOG_ERR( 1563 "Couldn't get object from session mempool"); 1564 return -ENOMEM; 1565 } 1566 1567 ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data); 1568 if (ret != 0) { 1569 PMD_DRV_LOG(ERR, "DPAA2 PMD: failed to configure " 1570 "session parameters"); 1571 1572 /* Return session to mempool */ 1573 rte_mempool_put(mempool, sess_private_data); 1574 return ret; 1575 } 1576 1577 set_session_private_data(sess, dev->driver_id, 1578 sess_private_data); 1579 1580 return 0; 1581 } 1582 1583 /** Clear the memory of session so it doesn't leave key material behind */ 1584 static void 1585 dpaa2_sec_session_clear(struct rte_cryptodev *dev, 1586 struct rte_cryptodev_sym_session *sess) 1587 { 1588 PMD_INIT_FUNC_TRACE(); 1589 uint8_t index = dev->driver_id; 1590 void *sess_priv = get_session_private_data(sess, index); 1591 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv; 1592 1593 if (sess_priv) { 1594 rte_free(s->ctxt); 1595 rte_free(s->cipher_key.data); 1596 rte_free(s->auth_key.data); 1597 memset(sess, 0, sizeof(dpaa2_sec_session)); 1598 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 1599 set_session_private_data(sess, index, NULL); 1600 rte_mempool_put(sess_mp, sess_priv); 1601 } 1602 } 1603 1604 static int 1605 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused, 1606 struct rte_cryptodev_config *config __rte_unused) 1607 { 1608 PMD_INIT_FUNC_TRACE(); 1609 1610 return 0; 1611 } 1612 1613 static int 1614 dpaa2_sec_dev_start(struct rte_cryptodev *dev) 1615 { 1616 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 1617 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 1618 struct dpseci_attr attr; 1619 struct dpaa2_queue *dpaa2_q; 1620 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 1621 dev->data->queue_pairs; 1622 struct dpseci_rx_queue_attr rx_attr; 1623 struct dpseci_tx_queue_attr tx_attr; 1624 int ret, i; 1625 1626 PMD_INIT_FUNC_TRACE(); 1627 1628 memset(&attr, 0, sizeof(struct dpseci_attr)); 1629 1630 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token); 1631 if (ret) { 1632 PMD_INIT_LOG(ERR, "DPSECI with HW_ID = %d ENABLE FAILED\n", 1633 priv->hw_id); 1634 goto get_attr_failure; 1635 } 1636 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr); 1637 if (ret) { 1638 PMD_INIT_LOG(ERR, 1639 "DPSEC ATTRIBUTE READ FAILED, disabling DPSEC\n"); 1640 goto get_attr_failure; 1641 } 1642 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) { 1643 dpaa2_q = &qp[i]->rx_vq; 1644 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 1645 &rx_attr); 1646 dpaa2_q->fqid = rx_attr.fqid; 1647 PMD_INIT_LOG(DEBUG, "rx_fqid: %d", dpaa2_q->fqid); 1648 } 1649 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) { 1650 dpaa2_q = &qp[i]->tx_vq; 1651 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i, 1652 &tx_attr); 1653 dpaa2_q->fqid = tx_attr.fqid; 1654 PMD_INIT_LOG(DEBUG, "tx_fqid: %d", dpaa2_q->fqid); 1655 } 1656 1657 return 0; 1658 get_attr_failure: 1659 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 1660 return -1; 1661 } 1662 1663 static void 1664 dpaa2_sec_dev_stop(struct rte_cryptodev *dev) 1665 { 1666 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 1667 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 1668 int ret; 1669 1670 PMD_INIT_FUNC_TRACE(); 1671 1672 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); 1673 if (ret) { 1674 PMD_INIT_LOG(ERR, "Failure in disabling dpseci %d device", 1675 priv->hw_id); 1676 return; 1677 } 1678 1679 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token); 1680 if (ret < 0) { 1681 PMD_INIT_LOG(ERR, "SEC Device cannot be reset:Error = %0x\n", 1682 ret); 1683 return; 1684 } 1685 } 1686 1687 static int 1688 dpaa2_sec_dev_close(struct rte_cryptodev *dev) 1689 { 1690 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 1691 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 1692 int ret; 1693 1694 PMD_INIT_FUNC_TRACE(); 1695 1696 /* Function is reverse of dpaa2_sec_dev_init. 1697 * It does the following: 1698 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id 1699 * 2. Close the DPSECI device 1700 * 3. Free the allocated resources. 1701 */ 1702 1703 /*Close the device at underlying layer*/ 1704 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token); 1705 if (ret) { 1706 PMD_INIT_LOG(ERR, "Failure closing dpseci device with" 1707 " error code %d\n", ret); 1708 return -1; 1709 } 1710 1711 /*Free the allocated memory for ethernet private data and dpseci*/ 1712 priv->hw = NULL; 1713 rte_free(dpseci); 1714 1715 return 0; 1716 } 1717 1718 static void 1719 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev, 1720 struct rte_cryptodev_info *info) 1721 { 1722 struct dpaa2_sec_dev_private *internals = dev->data->dev_private; 1723 1724 PMD_INIT_FUNC_TRACE(); 1725 if (info != NULL) { 1726 info->max_nb_queue_pairs = internals->max_nb_queue_pairs; 1727 info->feature_flags = dev->feature_flags; 1728 info->capabilities = dpaa2_sec_capabilities; 1729 info->sym.max_nb_sessions = internals->max_nb_sessions; 1730 info->driver_id = cryptodev_driver_id; 1731 } 1732 } 1733 1734 static 1735 void dpaa2_sec_stats_get(struct rte_cryptodev *dev, 1736 struct rte_cryptodev_stats *stats) 1737 { 1738 struct dpaa2_sec_dev_private *priv = dev->data->dev_private; 1739 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; 1740 struct dpseci_sec_counters counters = {0}; 1741 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 1742 dev->data->queue_pairs; 1743 int ret, i; 1744 1745 PMD_INIT_FUNC_TRACE(); 1746 if (stats == NULL) { 1747 PMD_DRV_LOG(ERR, "invalid stats ptr NULL"); 1748 return; 1749 } 1750 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 1751 if (qp[i] == NULL) { 1752 PMD_DRV_LOG(DEBUG, "Uninitialised queue pair"); 1753 continue; 1754 } 1755 1756 stats->enqueued_count += qp[i]->tx_vq.tx_pkts; 1757 stats->dequeued_count += qp[i]->rx_vq.rx_pkts; 1758 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts; 1759 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts; 1760 } 1761 1762 ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token, 1763 &counters); 1764 if (ret) { 1765 PMD_DRV_LOG(ERR, "dpseci_get_sec_counters failed\n"); 1766 } else { 1767 PMD_DRV_LOG(INFO, "dpseci hw stats:" 1768 "\n\tNumber of Requests Dequeued = %lu" 1769 "\n\tNumber of Outbound Encrypt Requests = %lu" 1770 "\n\tNumber of Inbound Decrypt Requests = %lu" 1771 "\n\tNumber of Outbound Bytes Encrypted = %lu" 1772 "\n\tNumber of Outbound Bytes Protected = %lu" 1773 "\n\tNumber of Inbound Bytes Decrypted = %lu" 1774 "\n\tNumber of Inbound Bytes Validated = %lu", 1775 counters.dequeued_requests, 1776 counters.ob_enc_requests, 1777 counters.ib_dec_requests, 1778 counters.ob_enc_bytes, 1779 counters.ob_prot_bytes, 1780 counters.ib_dec_bytes, 1781 counters.ib_valid_bytes); 1782 } 1783 } 1784 1785 static 1786 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev) 1787 { 1788 int i; 1789 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) 1790 (dev->data->queue_pairs); 1791 1792 PMD_INIT_FUNC_TRACE(); 1793 1794 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 1795 if (qp[i] == NULL) { 1796 PMD_DRV_LOG(DEBUG, "Uninitialised queue pair"); 1797 continue; 1798 } 1799 qp[i]->tx_vq.rx_pkts = 0; 1800 qp[i]->tx_vq.tx_pkts = 0; 1801 qp[i]->tx_vq.err_pkts = 0; 1802 qp[i]->rx_vq.rx_pkts = 0; 1803 qp[i]->rx_vq.tx_pkts = 0; 1804 qp[i]->rx_vq.err_pkts = 0; 1805 } 1806 } 1807 1808 static struct rte_cryptodev_ops crypto_ops = { 1809 .dev_configure = dpaa2_sec_dev_configure, 1810 .dev_start = dpaa2_sec_dev_start, 1811 .dev_stop = dpaa2_sec_dev_stop, 1812 .dev_close = dpaa2_sec_dev_close, 1813 .dev_infos_get = dpaa2_sec_dev_infos_get, 1814 .stats_get = dpaa2_sec_stats_get, 1815 .stats_reset = dpaa2_sec_stats_reset, 1816 .queue_pair_setup = dpaa2_sec_queue_pair_setup, 1817 .queue_pair_release = dpaa2_sec_queue_pair_release, 1818 .queue_pair_start = dpaa2_sec_queue_pair_start, 1819 .queue_pair_stop = dpaa2_sec_queue_pair_stop, 1820 .queue_pair_count = dpaa2_sec_queue_pair_count, 1821 .session_get_size = dpaa2_sec_session_get_size, 1822 .session_configure = dpaa2_sec_session_configure, 1823 .session_clear = dpaa2_sec_session_clear, 1824 }; 1825 1826 static int 1827 dpaa2_sec_uninit(const struct rte_cryptodev *dev) 1828 { 1829 struct dpaa2_sec_dev_private *internals = dev->data->dev_private; 1830 1831 rte_mempool_free(internals->fle_pool); 1832 1833 PMD_INIT_LOG(INFO, "Closing DPAA2_SEC device %s on numa socket %u\n", 1834 dev->data->name, rte_socket_id()); 1835 1836 return 0; 1837 } 1838 1839 static int 1840 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev) 1841 { 1842 struct dpaa2_sec_dev_private *internals; 1843 struct rte_device *dev = cryptodev->device; 1844 struct rte_dpaa2_device *dpaa2_dev; 1845 struct fsl_mc_io *dpseci; 1846 uint16_t token; 1847 struct dpseci_attr attr; 1848 int retcode, hw_id; 1849 char str[20]; 1850 1851 PMD_INIT_FUNC_TRACE(); 1852 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 1853 if (dpaa2_dev == NULL) { 1854 PMD_INIT_LOG(ERR, "dpaa2_device not found\n"); 1855 return -1; 1856 } 1857 hw_id = dpaa2_dev->object_id; 1858 1859 cryptodev->driver_id = cryptodev_driver_id; 1860 cryptodev->dev_ops = &crypto_ops; 1861 1862 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst; 1863 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst; 1864 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 1865 RTE_CRYPTODEV_FF_HW_ACCELERATED | 1866 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING; 1867 1868 internals = cryptodev->data->dev_private; 1869 internals->max_nb_sessions = RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS; 1870 1871 /* 1872 * For secondary processes, we don't initialise any further as primary 1873 * has already done this work. Only check we don't need a different 1874 * RX function 1875 */ 1876 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1877 PMD_INIT_LOG(DEBUG, "Device already init by primary process"); 1878 return 0; 1879 } 1880 /*Open the rte device via MC and save the handle for further use*/ 1881 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1, 1882 sizeof(struct fsl_mc_io), 0); 1883 if (!dpseci) { 1884 PMD_INIT_LOG(ERR, 1885 "Error in allocating the memory for dpsec object"); 1886 return -1; 1887 } 1888 dpseci->regs = rte_mcp_ptr_list[0]; 1889 1890 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token); 1891 if (retcode != 0) { 1892 PMD_INIT_LOG(ERR, "Cannot open the dpsec device: Error = %x", 1893 retcode); 1894 goto init_error; 1895 } 1896 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr); 1897 if (retcode != 0) { 1898 PMD_INIT_LOG(ERR, 1899 "Cannot get dpsec device attributed: Error = %x", 1900 retcode); 1901 goto init_error; 1902 } 1903 sprintf(cryptodev->data->name, "dpsec-%u", hw_id); 1904 1905 internals->max_nb_queue_pairs = attr.num_tx_queues; 1906 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs; 1907 internals->hw = dpseci; 1908 internals->token = token; 1909 1910 sprintf(str, "fle_pool_%d", cryptodev->data->dev_id); 1911 internals->fle_pool = rte_mempool_create((const char *)str, 1912 FLE_POOL_NUM_BUFS, 1913 FLE_POOL_BUF_SIZE, 1914 FLE_POOL_CACHE_SIZE, 0, 1915 NULL, NULL, NULL, NULL, 1916 SOCKET_ID_ANY, 0); 1917 if (!internals->fle_pool) { 1918 RTE_LOG(ERR, PMD, "%s create failed\n", str); 1919 goto init_error; 1920 } 1921 1922 PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name); 1923 return 0; 1924 1925 init_error: 1926 PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name); 1927 1928 /* dpaa2_sec_uninit(crypto_dev_name); */ 1929 return -EFAULT; 1930 } 1931 1932 static int 1933 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv, 1934 struct rte_dpaa2_device *dpaa2_dev) 1935 { 1936 struct rte_cryptodev *cryptodev; 1937 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 1938 1939 int retval; 1940 1941 sprintf(cryptodev_name, "dpsec-%d", dpaa2_dev->object_id); 1942 1943 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id()); 1944 if (cryptodev == NULL) 1945 return -ENOMEM; 1946 1947 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1948 cryptodev->data->dev_private = rte_zmalloc_socket( 1949 "cryptodev private structure", 1950 sizeof(struct dpaa2_sec_dev_private), 1951 RTE_CACHE_LINE_SIZE, 1952 rte_socket_id()); 1953 1954 if (cryptodev->data->dev_private == NULL) 1955 rte_panic("Cannot allocate memzone for private " 1956 "device data"); 1957 } 1958 1959 dpaa2_dev->cryptodev = cryptodev; 1960 cryptodev->device = &dpaa2_dev->device; 1961 cryptodev->device->driver = &dpaa2_drv->driver; 1962 1963 /* init user callbacks */ 1964 TAILQ_INIT(&(cryptodev->link_intr_cbs)); 1965 1966 /* Invoke PMD device initialization function */ 1967 retval = dpaa2_sec_dev_init(cryptodev); 1968 if (retval == 0) 1969 return 0; 1970 1971 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1972 rte_free(cryptodev->data->dev_private); 1973 1974 cryptodev->attached = RTE_CRYPTODEV_DETACHED; 1975 1976 return -ENXIO; 1977 } 1978 1979 static int 1980 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev) 1981 { 1982 struct rte_cryptodev *cryptodev; 1983 int ret; 1984 1985 cryptodev = dpaa2_dev->cryptodev; 1986 if (cryptodev == NULL) 1987 return -ENODEV; 1988 1989 ret = dpaa2_sec_uninit(cryptodev); 1990 if (ret) 1991 return ret; 1992 1993 /* free crypto device */ 1994 rte_cryptodev_pmd_release_device(cryptodev); 1995 1996 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1997 rte_free(cryptodev->data->dev_private); 1998 1999 cryptodev->device = NULL; 2000 cryptodev->data = NULL; 2001 2002 return 0; 2003 } 2004 2005 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = { 2006 .drv_type = DPAA2_CRYPTO, 2007 .driver = { 2008 .name = "DPAA2 SEC PMD" 2009 }, 2010 .probe = cryptodev_dpaa2_sec_probe, 2011 .remove = cryptodev_dpaa2_sec_remove, 2012 }; 2013 2014 static struct cryptodev_driver dpaa2_sec_crypto_drv; 2015 2016 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver); 2017 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, rte_dpaa2_sec_driver, 2018 cryptodev_driver_id); 2019