1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2017-2019 NXP 3 */ 4 5 #include <fcntl.h> 6 #include <unistd.h> 7 #include <sched.h> 8 #include <net/if.h> 9 10 #include <rte_byteorder.h> 11 #include <rte_common.h> 12 #include <rte_cryptodev_pmd.h> 13 #include <rte_crypto.h> 14 #include <rte_cryptodev.h> 15 #include <rte_bus_vdev.h> 16 #include <rte_malloc.h> 17 #include <rte_security_driver.h> 18 #include <rte_hexdump.h> 19 20 #include <caam_jr_capabilities.h> 21 #include <caam_jr_config.h> 22 #include <caam_jr_hw_specific.h> 23 #include <caam_jr_pvt.h> 24 #include <caam_jr_desc.h> 25 #include <caam_jr_log.h> 26 27 /* RTA header files */ 28 #include <hw/desc/common.h> 29 #include <hw/desc/algo.h> 30 #include <of.h> 31 32 #define CAAM_JR_DBG 0 33 #define CRYPTODEV_NAME_CAAM_JR_PMD crypto_caam_jr 34 static uint8_t cryptodev_driver_id; 35 int caam_jr_logtype; 36 37 enum rta_sec_era rta_sec_era; 38 39 /* Lists the states possible for the SEC user space driver. */ 40 enum sec_driver_state_e { 41 SEC_DRIVER_STATE_IDLE, /* Driver not initialized */ 42 SEC_DRIVER_STATE_STARTED, /* Driver initialized and can be used*/ 43 SEC_DRIVER_STATE_RELEASE, /* Driver release is in progress */ 44 }; 45 46 /* Job rings used for communication with SEC HW */ 47 static struct sec_job_ring_t g_job_rings[MAX_SEC_JOB_RINGS]; 48 49 /* The current state of SEC user space driver */ 50 static enum sec_driver_state_e g_driver_state = SEC_DRIVER_STATE_IDLE; 51 52 /* The number of job rings used by SEC user space driver */ 53 static int g_job_rings_no; 54 static int g_job_rings_max; 55 56 struct sec_outring_entry { 57 phys_addr_t desc; /* Pointer to completed descriptor */ 58 uint32_t status; /* Status for completed descriptor */ 59 } __rte_packed; 60 61 /* virtual address conversin when mempool support is available for ctx */ 62 static inline phys_addr_t 63 caam_jr_vtop_ctx(struct caam_jr_op_ctx *ctx, void *vaddr) 64 { 65 PMD_INIT_FUNC_TRACE(); 66 return (size_t)vaddr - ctx->vtop_offset; 67 } 68 69 static inline void 70 caam_jr_op_ending(struct caam_jr_op_ctx *ctx) 71 { 72 PMD_INIT_FUNC_TRACE(); 73 /* report op status to sym->op and then free the ctx memeory */ 74 rte_mempool_put(ctx->ctx_pool, (void *)ctx); 75 } 76 77 static inline struct caam_jr_op_ctx * 78 caam_jr_alloc_ctx(struct caam_jr_session *ses) 79 { 80 struct caam_jr_op_ctx *ctx; 81 int ret; 82 83 PMD_INIT_FUNC_TRACE(); 84 ret = rte_mempool_get(ses->ctx_pool, (void **)(&ctx)); 85 if (!ctx || ret) { 86 CAAM_JR_DP_WARN("Alloc sec descriptor failed!"); 87 return NULL; 88 } 89 /* 90 * Clear SG memory. There are 16 SG entries of 16 Bytes each. 91 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times 92 * to clear all the SG entries. caam_jr_alloc_ctx() is called for 93 * each packet, memset is costlier than dcbz_64(). 94 */ 95 dcbz_64(&ctx->sg[SG_CACHELINE_0]); 96 dcbz_64(&ctx->sg[SG_CACHELINE_1]); 97 dcbz_64(&ctx->sg[SG_CACHELINE_2]); 98 dcbz_64(&ctx->sg[SG_CACHELINE_3]); 99 100 ctx->ctx_pool = ses->ctx_pool; 101 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx); 102 103 return ctx; 104 } 105 106 static 107 void caam_jr_stats_get(struct rte_cryptodev *dev, 108 struct rte_cryptodev_stats *stats) 109 { 110 struct caam_jr_qp **qp = (struct caam_jr_qp **) 111 dev->data->queue_pairs; 112 int i; 113 114 PMD_INIT_FUNC_TRACE(); 115 if (stats == NULL) { 116 CAAM_JR_ERR("Invalid stats ptr NULL"); 117 return; 118 } 119 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 120 if (qp[i] == NULL) { 121 CAAM_JR_WARN("Uninitialised queue pair"); 122 continue; 123 } 124 125 stats->enqueued_count += qp[i]->tx_pkts; 126 stats->dequeued_count += qp[i]->rx_pkts; 127 stats->enqueue_err_count += qp[i]->tx_errs; 128 stats->dequeue_err_count += qp[i]->rx_errs; 129 CAAM_JR_INFO("extra stats:\n\tRX Poll ERR = %" PRIu64 130 "\n\tTX Ring Full = %" PRIu64, 131 qp[i]->rx_poll_err, 132 qp[i]->tx_ring_full); 133 } 134 } 135 136 static 137 void caam_jr_stats_reset(struct rte_cryptodev *dev) 138 { 139 int i; 140 struct caam_jr_qp **qp = (struct caam_jr_qp **) 141 (dev->data->queue_pairs); 142 143 PMD_INIT_FUNC_TRACE(); 144 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 145 if (qp[i] == NULL) { 146 CAAM_JR_WARN("Uninitialised queue pair"); 147 continue; 148 } 149 qp[i]->rx_pkts = 0; 150 qp[i]->rx_errs = 0; 151 qp[i]->rx_poll_err = 0; 152 qp[i]->tx_pkts = 0; 153 qp[i]->tx_errs = 0; 154 qp[i]->tx_ring_full = 0; 155 } 156 } 157 158 static inline int 159 is_cipher_only(struct caam_jr_session *ses) 160 { 161 PMD_INIT_FUNC_TRACE(); 162 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) && 163 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL)); 164 } 165 166 static inline int 167 is_auth_only(struct caam_jr_session *ses) 168 { 169 PMD_INIT_FUNC_TRACE(); 170 return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) && 171 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL)); 172 } 173 174 static inline int 175 is_aead(struct caam_jr_session *ses) 176 { 177 PMD_INIT_FUNC_TRACE(); 178 return ((ses->cipher_alg == 0) && 179 (ses->auth_alg == 0) && 180 (ses->aead_alg != 0)); 181 } 182 183 static inline int 184 is_auth_cipher(struct caam_jr_session *ses) 185 { 186 PMD_INIT_FUNC_TRACE(); 187 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) && 188 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) && 189 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC)); 190 } 191 192 static inline int 193 is_proto_ipsec(struct caam_jr_session *ses) 194 { 195 PMD_INIT_FUNC_TRACE(); 196 return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC); 197 } 198 199 static inline int 200 is_encode(struct caam_jr_session *ses) 201 { 202 PMD_INIT_FUNC_TRACE(); 203 return ses->dir == DIR_ENC; 204 } 205 206 static inline int 207 is_decode(struct caam_jr_session *ses) 208 { 209 PMD_INIT_FUNC_TRACE(); 210 return ses->dir == DIR_DEC; 211 } 212 213 static inline void 214 caam_auth_alg(struct caam_jr_session *ses, struct alginfo *alginfo_a) 215 { 216 PMD_INIT_FUNC_TRACE(); 217 switch (ses->auth_alg) { 218 case RTE_CRYPTO_AUTH_NULL: 219 ses->digest_length = 0; 220 break; 221 case RTE_CRYPTO_AUTH_MD5_HMAC: 222 alginfo_a->algtype = 223 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ? 224 OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5; 225 alginfo_a->algmode = OP_ALG_AAI_HMAC; 226 break; 227 case RTE_CRYPTO_AUTH_SHA1_HMAC: 228 alginfo_a->algtype = 229 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ? 230 OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1; 231 alginfo_a->algmode = OP_ALG_AAI_HMAC; 232 break; 233 case RTE_CRYPTO_AUTH_SHA224_HMAC: 234 alginfo_a->algtype = 235 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ? 236 OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224; 237 alginfo_a->algmode = OP_ALG_AAI_HMAC; 238 break; 239 case RTE_CRYPTO_AUTH_SHA256_HMAC: 240 alginfo_a->algtype = 241 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ? 242 OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256; 243 alginfo_a->algmode = OP_ALG_AAI_HMAC; 244 break; 245 case RTE_CRYPTO_AUTH_SHA384_HMAC: 246 alginfo_a->algtype = 247 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ? 248 OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384; 249 alginfo_a->algmode = OP_ALG_AAI_HMAC; 250 break; 251 case RTE_CRYPTO_AUTH_SHA512_HMAC: 252 alginfo_a->algtype = 253 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ? 254 OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512; 255 alginfo_a->algmode = OP_ALG_AAI_HMAC; 256 break; 257 default: 258 CAAM_JR_DEBUG("unsupported auth alg %u", ses->auth_alg); 259 } 260 } 261 262 static inline void 263 caam_cipher_alg(struct caam_jr_session *ses, struct alginfo *alginfo_c) 264 { 265 PMD_INIT_FUNC_TRACE(); 266 switch (ses->cipher_alg) { 267 case RTE_CRYPTO_CIPHER_NULL: 268 break; 269 case RTE_CRYPTO_CIPHER_AES_CBC: 270 alginfo_c->algtype = 271 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ? 272 OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES; 273 alginfo_c->algmode = OP_ALG_AAI_CBC; 274 break; 275 case RTE_CRYPTO_CIPHER_3DES_CBC: 276 alginfo_c->algtype = 277 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ? 278 OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES; 279 alginfo_c->algmode = OP_ALG_AAI_CBC; 280 break; 281 case RTE_CRYPTO_CIPHER_AES_CTR: 282 alginfo_c->algtype = 283 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ? 284 OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES; 285 alginfo_c->algmode = OP_ALG_AAI_CTR; 286 break; 287 default: 288 CAAM_JR_DEBUG("unsupported cipher alg %d", ses->cipher_alg); 289 } 290 } 291 292 static inline void 293 caam_aead_alg(struct caam_jr_session *ses, struct alginfo *alginfo) 294 { 295 PMD_INIT_FUNC_TRACE(); 296 switch (ses->aead_alg) { 297 case RTE_CRYPTO_AEAD_AES_GCM: 298 alginfo->algtype = OP_ALG_ALGSEL_AES; 299 alginfo->algmode = OP_ALG_AAI_GCM; 300 break; 301 default: 302 CAAM_JR_DEBUG("unsupported AEAD alg %d", ses->aead_alg); 303 } 304 } 305 306 /* prepare command block of the session */ 307 static int 308 caam_jr_prep_cdb(struct caam_jr_session *ses) 309 { 310 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0}; 311 int32_t shared_desc_len = 0; 312 struct sec_cdb *cdb; 313 int err; 314 #if CAAM_BYTE_ORDER == CORE_BYTE_ORDER 315 int swap = false; 316 #else 317 int swap = true; 318 #endif 319 320 PMD_INIT_FUNC_TRACE(); 321 if (ses->cdb) 322 caam_jr_dma_free(ses->cdb); 323 324 cdb = caam_jr_dma_mem_alloc(L1_CACHE_BYTES, sizeof(struct sec_cdb)); 325 if (!cdb) { 326 CAAM_JR_ERR("failed to allocate memory for cdb\n"); 327 return -1; 328 } 329 330 ses->cdb = cdb; 331 332 memset(cdb, 0, sizeof(struct sec_cdb)); 333 334 if (is_cipher_only(ses)) { 335 caam_cipher_alg(ses, &alginfo_c); 336 if (alginfo_c.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) { 337 CAAM_JR_ERR("not supported cipher alg"); 338 rte_free(cdb); 339 return -ENOTSUP; 340 } 341 342 alginfo_c.key = (size_t)ses->cipher_key.data; 343 alginfo_c.keylen = ses->cipher_key.length; 344 alginfo_c.key_enc_flags = 0; 345 alginfo_c.key_type = RTA_DATA_IMM; 346 347 shared_desc_len = cnstr_shdsc_blkcipher( 348 cdb->sh_desc, true, 349 swap, SHR_NEVER, &alginfo_c, 350 NULL, 351 ses->iv.length, 352 ses->dir); 353 } else if (is_auth_only(ses)) { 354 caam_auth_alg(ses, &alginfo_a); 355 if (alginfo_a.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) { 356 CAAM_JR_ERR("not supported auth alg"); 357 rte_free(cdb); 358 return -ENOTSUP; 359 } 360 361 alginfo_a.key = (size_t)ses->auth_key.data; 362 alginfo_a.keylen = ses->auth_key.length; 363 alginfo_a.key_enc_flags = 0; 364 alginfo_a.key_type = RTA_DATA_IMM; 365 366 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true, 367 swap, SHR_NEVER, &alginfo_a, 368 !ses->dir, 369 ses->digest_length); 370 } else if (is_aead(ses)) { 371 caam_aead_alg(ses, &alginfo); 372 if (alginfo.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) { 373 CAAM_JR_ERR("not supported aead alg"); 374 rte_free(cdb); 375 return -ENOTSUP; 376 } 377 alginfo.key = (size_t)ses->aead_key.data; 378 alginfo.keylen = ses->aead_key.length; 379 alginfo.key_enc_flags = 0; 380 alginfo.key_type = RTA_DATA_IMM; 381 382 if (ses->dir == DIR_ENC) 383 shared_desc_len = cnstr_shdsc_gcm_encap( 384 cdb->sh_desc, true, swap, 385 SHR_NEVER, &alginfo, 386 ses->iv.length, 387 ses->digest_length); 388 else 389 shared_desc_len = cnstr_shdsc_gcm_decap( 390 cdb->sh_desc, true, swap, 391 SHR_NEVER, &alginfo, 392 ses->iv.length, 393 ses->digest_length); 394 } else { 395 caam_cipher_alg(ses, &alginfo_c); 396 if (alginfo_c.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) { 397 CAAM_JR_ERR("not supported cipher alg"); 398 rte_free(cdb); 399 return -ENOTSUP; 400 } 401 402 alginfo_c.key = (size_t)ses->cipher_key.data; 403 alginfo_c.keylen = ses->cipher_key.length; 404 alginfo_c.key_enc_flags = 0; 405 alginfo_c.key_type = RTA_DATA_IMM; 406 407 caam_auth_alg(ses, &alginfo_a); 408 if (alginfo_a.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) { 409 CAAM_JR_ERR("not supported auth alg"); 410 rte_free(cdb); 411 return -ENOTSUP; 412 } 413 414 alginfo_a.key = (size_t)ses->auth_key.data; 415 alginfo_a.keylen = ses->auth_key.length; 416 alginfo_a.key_enc_flags = 0; 417 alginfo_a.key_type = RTA_DATA_IMM; 418 419 cdb->sh_desc[0] = alginfo_c.keylen; 420 cdb->sh_desc[1] = alginfo_a.keylen; 421 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, 422 MIN_JOB_DESC_SIZE, 423 (unsigned int *)cdb->sh_desc, 424 &cdb->sh_desc[2], 2); 425 426 if (err < 0) { 427 CAAM_JR_ERR("Crypto: Incorrect key lengths"); 428 rte_free(cdb); 429 return err; 430 } 431 if (cdb->sh_desc[2] & 1) 432 alginfo_c.key_type = RTA_DATA_IMM; 433 else { 434 alginfo_c.key = (size_t)caam_jr_mem_vtop( 435 (void *)(size_t)alginfo_c.key); 436 alginfo_c.key_type = RTA_DATA_PTR; 437 } 438 if (cdb->sh_desc[2] & (1<<1)) 439 alginfo_a.key_type = RTA_DATA_IMM; 440 else { 441 alginfo_a.key = (size_t)caam_jr_mem_vtop( 442 (void *)(size_t)alginfo_a.key); 443 alginfo_a.key_type = RTA_DATA_PTR; 444 } 445 cdb->sh_desc[0] = 0; 446 cdb->sh_desc[1] = 0; 447 cdb->sh_desc[2] = 0; 448 if (is_proto_ipsec(ses)) { 449 if (ses->dir == DIR_ENC) { 450 shared_desc_len = cnstr_shdsc_ipsec_new_encap( 451 cdb->sh_desc, 452 true, swap, SHR_SERIAL, 453 &ses->encap_pdb, 454 (uint8_t *)&ses->ip4_hdr, 455 &alginfo_c, &alginfo_a); 456 } else if (ses->dir == DIR_DEC) { 457 shared_desc_len = cnstr_shdsc_ipsec_new_decap( 458 cdb->sh_desc, 459 true, swap, SHR_SERIAL, 460 &ses->decap_pdb, 461 &alginfo_c, &alginfo_a); 462 } 463 } else { 464 /* Auth_only_len is set as 0 here and it will be 465 * overwritten in fd for each packet. 466 */ 467 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc, 468 true, swap, SHR_SERIAL, 469 &alginfo_c, &alginfo_a, 470 ses->iv.length, 0, 471 ses->digest_length, ses->dir); 472 } 473 } 474 475 if (shared_desc_len < 0) { 476 CAAM_JR_ERR("error in preparing command block"); 477 return shared_desc_len; 478 } 479 480 #if CAAM_JR_DBG 481 SEC_DUMP_DESC(cdb->sh_desc); 482 #endif 483 484 cdb->sh_hdr.hi.field.idlen = shared_desc_len; 485 486 return 0; 487 } 488 489 /* @brief Poll the HW for already processed jobs in the JR 490 * and silently discard the available jobs or notify them to UA 491 * with indicated error code. 492 * 493 * @param [in,out] job_ring The job ring to poll. 494 * @param [in] do_notify Can be #TRUE or #FALSE. Indicates if 495 * descriptors are to be discarded 496 * or notified to UA with given error_code. 497 * @param [out] notified_descs Number of notified descriptors. Can be NULL 498 * if do_notify is #FALSE 499 */ 500 static void 501 hw_flush_job_ring(struct sec_job_ring_t *job_ring, 502 uint32_t do_notify, 503 uint32_t *notified_descs) 504 { 505 int32_t jobs_no_to_discard = 0; 506 int32_t discarded_descs_no = 0; 507 508 PMD_INIT_FUNC_TRACE(); 509 CAAM_JR_DEBUG("Jr[%p] pi[%d] ci[%d].Flushing jr notify desc=[%d]", 510 job_ring, job_ring->pidx, job_ring->cidx, do_notify); 511 512 jobs_no_to_discard = hw_get_no_finished_jobs(job_ring); 513 514 /* Discard all jobs */ 515 CAAM_JR_DEBUG("Jr[%p] pi[%d] ci[%d].Discarding %d descs", 516 job_ring, job_ring->pidx, job_ring->cidx, 517 jobs_no_to_discard); 518 519 while (jobs_no_to_discard > discarded_descs_no) { 520 discarded_descs_no++; 521 /* Now increment the consumer index for the current job ring, 522 * AFTER saving job in temporary location! 523 * Increment the consumer index for the current job ring 524 */ 525 job_ring->cidx = SEC_CIRCULAR_COUNTER(job_ring->cidx, 526 SEC_JOB_RING_SIZE); 527 528 hw_remove_entries(job_ring, 1); 529 } 530 531 if (do_notify == true) { 532 ASSERT(notified_descs != NULL); 533 *notified_descs = discarded_descs_no; 534 } 535 } 536 537 /* @brief Poll the HW for already processed jobs in the JR 538 * and notify the available jobs to UA. 539 * 540 * @param [in] job_ring The job ring to poll. 541 * @param [in] limit The maximum number of jobs to notify. 542 * If set to negative value, all available jobs are 543 * notified. 544 * 545 * @retval >=0 for No of jobs notified to UA. 546 * @retval -1 for error 547 */ 548 static int 549 hw_poll_job_ring(struct sec_job_ring_t *job_ring, 550 struct rte_crypto_op **ops, int32_t limit, 551 struct caam_jr_qp *jr_qp) 552 { 553 int32_t jobs_no_to_notify = 0; /* the number of done jobs to notify*/ 554 int32_t number_of_jobs_available = 0; 555 int32_t notified_descs_no = 0; 556 uint32_t sec_error_code = 0; 557 struct job_descriptor *current_desc; 558 phys_addr_t current_desc_addr; 559 phys_addr_t *temp_addr; 560 struct caam_jr_op_ctx *ctx; 561 562 PMD_INIT_FUNC_TRACE(); 563 /* TODO check for ops have memory*/ 564 /* check here if any JR error that cannot be written 565 * in the output status word has occurred 566 */ 567 if (JR_REG_JRINT_JRE_EXTRACT(GET_JR_REG(JRINT, job_ring))) { 568 CAAM_JR_INFO("err received"); 569 sec_error_code = JR_REG_JRINT_ERR_TYPE_EXTRACT( 570 GET_JR_REG(JRINT, job_ring)); 571 if (unlikely(sec_error_code)) { 572 hw_job_ring_error_print(job_ring, sec_error_code); 573 return -1; 574 } 575 } 576 /* compute the number of jobs available in the job ring based on the 577 * producer and consumer index values. 578 */ 579 number_of_jobs_available = hw_get_no_finished_jobs(job_ring); 580 /* Compute the number of notifications that need to be raised to UA 581 * If limit > total number of done jobs -> notify all done jobs 582 * If limit = 0 -> error 583 * If limit < total number of done jobs -> notify a number 584 * of done jobs equal with limit 585 */ 586 jobs_no_to_notify = (limit > number_of_jobs_available) ? 587 number_of_jobs_available : limit; 588 CAAM_JR_DP_DEBUG( 589 "Jr[%p] pi[%d] ci[%d].limit =%d Available=%d.Jobs to notify=%d", 590 job_ring, job_ring->pidx, job_ring->cidx, 591 limit, number_of_jobs_available, jobs_no_to_notify); 592 593 rte_smp_rmb(); 594 595 while (jobs_no_to_notify > notified_descs_no) { 596 static uint64_t false_alarm; 597 static uint64_t real_poll; 598 599 /* Get job status here */ 600 sec_error_code = job_ring->output_ring[job_ring->cidx].status; 601 /* Get completed descriptor */ 602 temp_addr = &(job_ring->output_ring[job_ring->cidx].desc); 603 current_desc_addr = (phys_addr_t)sec_read_addr(temp_addr); 604 605 real_poll++; 606 /* todo check if it is false alarm no desc present */ 607 if (!current_desc_addr) { 608 false_alarm++; 609 printf("false alarm %" PRIu64 "real %" PRIu64 610 " sec_err =0x%x cidx Index =0%d\n", 611 false_alarm, real_poll, 612 sec_error_code, job_ring->cidx); 613 rte_panic("CAAM JR descriptor NULL"); 614 return notified_descs_no; 615 } 616 current_desc = (struct job_descriptor *) 617 caam_jr_dma_ptov(current_desc_addr); 618 /* now increment the consumer index for the current job ring, 619 * AFTER saving job in temporary location! 620 */ 621 job_ring->cidx = SEC_CIRCULAR_COUNTER(job_ring->cidx, 622 SEC_JOB_RING_SIZE); 623 /* Signal that the job has been processed and the slot is free*/ 624 hw_remove_entries(job_ring, 1); 625 /*TODO for multiple ops, packets*/ 626 ctx = container_of(current_desc, struct caam_jr_op_ctx, jobdes); 627 if (unlikely(sec_error_code)) { 628 CAAM_JR_ERR("desc at cidx %d generated error 0x%x\n", 629 job_ring->cidx, sec_error_code); 630 hw_handle_job_ring_error(job_ring, sec_error_code); 631 //todo improve with exact errors 632 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR; 633 jr_qp->rx_errs++; 634 } else { 635 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 636 #if CAAM_JR_DBG 637 if (ctx->op->sym->m_dst) { 638 rte_hexdump(stdout, "PROCESSED", 639 rte_pktmbuf_mtod(ctx->op->sym->m_dst, void *), 640 rte_pktmbuf_data_len(ctx->op->sym->m_dst)); 641 } else { 642 rte_hexdump(stdout, "PROCESSED", 643 rte_pktmbuf_mtod(ctx->op->sym->m_src, void *), 644 rte_pktmbuf_data_len(ctx->op->sym->m_src)); 645 } 646 #endif 647 } 648 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { 649 struct ip *ip4_hdr; 650 651 if (ctx->op->sym->m_dst) { 652 /*TODO check for ip header or other*/ 653 ip4_hdr = (struct ip *) 654 rte_pktmbuf_mtod(ctx->op->sym->m_dst, char*); 655 ctx->op->sym->m_dst->pkt_len = 656 rte_be_to_cpu_16(ip4_hdr->ip_len); 657 ctx->op->sym->m_dst->data_len = 658 rte_be_to_cpu_16(ip4_hdr->ip_len); 659 } else { 660 ip4_hdr = (struct ip *) 661 rte_pktmbuf_mtod(ctx->op->sym->m_src, char*); 662 ctx->op->sym->m_src->pkt_len = 663 rte_be_to_cpu_16(ip4_hdr->ip_len); 664 ctx->op->sym->m_src->data_len = 665 rte_be_to_cpu_16(ip4_hdr->ip_len); 666 } 667 } 668 *ops = ctx->op; 669 caam_jr_op_ending(ctx); 670 ops++; 671 notified_descs_no++; 672 } 673 return notified_descs_no; 674 } 675 676 static uint16_t 677 caam_jr_dequeue_burst(void *qp, struct rte_crypto_op **ops, 678 uint16_t nb_ops) 679 { 680 struct caam_jr_qp *jr_qp = (struct caam_jr_qp *)qp; 681 struct sec_job_ring_t *ring = jr_qp->ring; 682 int num_rx; 683 int ret; 684 685 PMD_INIT_FUNC_TRACE(); 686 CAAM_JR_DP_DEBUG("Jr[%p]Polling. limit[%d]", ring, nb_ops); 687 688 /* Poll job ring 689 * If nb_ops < 0 -> poll JR until no more notifications are available. 690 * If nb_ops > 0 -> poll JR until limit is reached. 691 */ 692 693 /* Run hw poll job ring */ 694 num_rx = hw_poll_job_ring(ring, ops, nb_ops, jr_qp); 695 if (num_rx < 0) { 696 CAAM_JR_ERR("Error polling SEC engine (%d)", num_rx); 697 return 0; 698 } 699 700 CAAM_JR_DP_DEBUG("Jr[%p].Jobs notified[%d]. ", ring, num_rx); 701 702 if (ring->jr_mode == SEC_NOTIFICATION_TYPE_NAPI) { 703 if (num_rx < nb_ops) { 704 ret = caam_jr_enable_irqs(ring->irq_fd); 705 SEC_ASSERT(ret == 0, ret, 706 "Failed to enable irqs for job ring %p", ring); 707 } 708 } else if (ring->jr_mode == SEC_NOTIFICATION_TYPE_IRQ) { 709 710 /* Always enable IRQ generation when in pure IRQ mode */ 711 ret = caam_jr_enable_irqs(ring->irq_fd); 712 SEC_ASSERT(ret == 0, ret, 713 "Failed to enable irqs for job ring %p", ring); 714 } 715 716 jr_qp->rx_pkts += num_rx; 717 718 return num_rx; 719 } 720 721 /** 722 * packet looks like: 723 * |<----data_len------->| 724 * |ip_header|ah_header|icv|payload| 725 * ^ 726 * | 727 * mbuf->pkt.data 728 */ 729 static inline struct caam_jr_op_ctx * 730 build_auth_only_sg(struct rte_crypto_op *op, struct caam_jr_session *ses) 731 { 732 struct rte_crypto_sym_op *sym = op->sym; 733 struct rte_mbuf *mbuf = sym->m_src; 734 struct caam_jr_op_ctx *ctx; 735 struct sec4_sg_entry *sg; 736 int length; 737 struct sec_cdb *cdb; 738 uint64_t sdesc_offset; 739 struct sec_job_descriptor_t *jobdescr; 740 uint8_t extra_segs; 741 742 PMD_INIT_FUNC_TRACE(); 743 if (is_decode(ses)) 744 extra_segs = 2; 745 else 746 extra_segs = 1; 747 748 if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) { 749 CAAM_JR_DP_ERR("Auth: Max sec segs supported is %d", 750 MAX_SG_ENTRIES); 751 return NULL; 752 } 753 754 ctx = caam_jr_alloc_ctx(ses); 755 if (!ctx) 756 return NULL; 757 758 ctx->op = op; 759 760 cdb = ses->cdb; 761 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb); 762 763 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc; 764 765 SEC_JD_INIT(jobdescr); 766 SEC_JD_SET_SD(jobdescr, 767 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset, 768 cdb->sh_hdr.hi.field.idlen); 769 770 /* output */ 771 SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)sym->auth.digest.phys_addr, 772 0, ses->digest_length); 773 774 /*input */ 775 sg = &ctx->sg[0]; 776 length = sym->auth.data.length; 777 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf) + sym->auth.data.offset); 778 sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset); 779 780 /* Successive segs */ 781 mbuf = mbuf->next; 782 while (mbuf) { 783 sg++; 784 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)); 785 sg->len = cpu_to_caam32(mbuf->data_len); 786 mbuf = mbuf->next; 787 } 788 789 if (is_decode(ses)) { 790 /* digest verification case */ 791 sg++; 792 /* hash result or digest, save digest first */ 793 rte_memcpy(ctx->digest, sym->auth.digest.data, 794 ses->digest_length); 795 #if CAAM_JR_DBG 796 rte_hexdump(stdout, "ICV", ctx->digest, ses->digest_length); 797 #endif 798 sg->ptr = cpu_to_caam64(caam_jr_vtop_ctx(ctx, ctx->digest)); 799 sg->len = cpu_to_caam32(ses->digest_length); 800 length += ses->digest_length; 801 } else { 802 sg->len -= ses->digest_length; 803 } 804 805 /* last element*/ 806 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN); 807 808 SEC_JD_SET_IN_PTR(jobdescr, 809 (uint64_t)caam_jr_vtop_ctx(ctx, &ctx->sg[0]), 0, length); 810 /* enabling sg list */ 811 (jobdescr)->seq_in.command.word |= 0x01000000; 812 813 return ctx; 814 } 815 816 static inline struct caam_jr_op_ctx * 817 build_auth_only(struct rte_crypto_op *op, struct caam_jr_session *ses) 818 { 819 struct rte_crypto_sym_op *sym = op->sym; 820 struct caam_jr_op_ctx *ctx; 821 struct sec4_sg_entry *sg; 822 rte_iova_t start_addr; 823 struct sec_cdb *cdb; 824 uint64_t sdesc_offset; 825 struct sec_job_descriptor_t *jobdescr; 826 827 PMD_INIT_FUNC_TRACE(); 828 ctx = caam_jr_alloc_ctx(ses); 829 if (!ctx) 830 return NULL; 831 832 ctx->op = op; 833 834 cdb = ses->cdb; 835 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb); 836 837 start_addr = rte_pktmbuf_iova(sym->m_src); 838 839 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc; 840 841 SEC_JD_INIT(jobdescr); 842 SEC_JD_SET_SD(jobdescr, 843 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset, 844 cdb->sh_hdr.hi.field.idlen); 845 846 /* output */ 847 SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)sym->auth.digest.phys_addr, 848 0, ses->digest_length); 849 850 /*input */ 851 if (is_decode(ses)) { 852 sg = &ctx->sg[0]; 853 SEC_JD_SET_IN_PTR(jobdescr, 854 (uint64_t)caam_jr_vtop_ctx(ctx, sg), 0, 855 (sym->auth.data.length + ses->digest_length)); 856 /* enabling sg list */ 857 (jobdescr)->seq_in.command.word |= 0x01000000; 858 859 /* hash result or digest, save digest first */ 860 rte_memcpy(ctx->digest, sym->auth.digest.data, 861 ses->digest_length); 862 sg->ptr = cpu_to_caam64(start_addr + sym->auth.data.offset); 863 sg->len = cpu_to_caam32(sym->auth.data.length); 864 865 #if CAAM_JR_DBG 866 rte_hexdump(stdout, "ICV", ctx->digest, ses->digest_length); 867 #endif 868 /* let's check digest by hw */ 869 sg++; 870 sg->ptr = cpu_to_caam64(caam_jr_vtop_ctx(ctx, ctx->digest)); 871 sg->len = cpu_to_caam32(ses->digest_length); 872 /* last element*/ 873 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN); 874 } else { 875 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)start_addr, 876 sym->auth.data.offset, sym->auth.data.length); 877 } 878 return ctx; 879 } 880 881 static inline struct caam_jr_op_ctx * 882 build_cipher_only_sg(struct rte_crypto_op *op, struct caam_jr_session *ses) 883 { 884 struct rte_crypto_sym_op *sym = op->sym; 885 struct rte_mbuf *mbuf = sym->m_src; 886 struct caam_jr_op_ctx *ctx; 887 struct sec4_sg_entry *sg, *in_sg; 888 int length; 889 struct sec_cdb *cdb; 890 uint64_t sdesc_offset; 891 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 892 ses->iv.offset); 893 struct sec_job_descriptor_t *jobdescr; 894 uint8_t reg_segs; 895 896 PMD_INIT_FUNC_TRACE(); 897 if (sym->m_dst) { 898 mbuf = sym->m_dst; 899 reg_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2; 900 } else { 901 mbuf = sym->m_src; 902 reg_segs = mbuf->nb_segs * 2 + 2; 903 } 904 905 if (reg_segs > MAX_SG_ENTRIES) { 906 CAAM_JR_DP_ERR("Cipher: Max sec segs supported is %d", 907 MAX_SG_ENTRIES); 908 return NULL; 909 } 910 911 ctx = caam_jr_alloc_ctx(ses); 912 if (!ctx) 913 return NULL; 914 915 ctx->op = op; 916 cdb = ses->cdb; 917 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb); 918 919 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc; 920 921 SEC_JD_INIT(jobdescr); 922 SEC_JD_SET_SD(jobdescr, 923 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset, 924 cdb->sh_hdr.hi.field.idlen); 925 926 #if CAAM_JR_DBG 927 CAAM_JR_INFO("mbuf offset =%d, cipher offset = %d, length =%d+%d", 928 sym->m_src->data_off, sym->cipher.data.offset, 929 sym->cipher.data.length, ses->iv.length); 930 #endif 931 /* output */ 932 if (sym->m_dst) 933 mbuf = sym->m_dst; 934 else 935 mbuf = sym->m_src; 936 937 sg = &ctx->sg[0]; 938 length = sym->cipher.data.length; 939 940 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf) 941 + sym->cipher.data.offset); 942 sg->len = cpu_to_caam32(mbuf->data_len - sym->cipher.data.offset); 943 944 /* Successive segs */ 945 mbuf = mbuf->next; 946 while (mbuf) { 947 sg++; 948 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)); 949 sg->len = cpu_to_caam32(mbuf->data_len); 950 mbuf = mbuf->next; 951 } 952 /* last element*/ 953 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN); 954 955 SEC_JD_SET_OUT_PTR(jobdescr, 956 (uint64_t)caam_jr_vtop_ctx(ctx, &ctx->sg[0]), 0, 957 length); 958 /*enabling sg bit */ 959 (jobdescr)->seq_out.command.word |= 0x01000000; 960 961 /*input */ 962 sg++; 963 mbuf = sym->m_src; 964 in_sg = sg; 965 966 length = sym->cipher.data.length + ses->iv.length; 967 968 /* IV */ 969 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr)); 970 sg->len = cpu_to_caam32(ses->iv.length); 971 972 /* 1st seg */ 973 sg++; 974 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf) 975 + sym->cipher.data.offset); 976 sg->len = cpu_to_caam32(mbuf->data_len - sym->cipher.data.offset); 977 978 /* Successive segs */ 979 mbuf = mbuf->next; 980 while (mbuf) { 981 sg++; 982 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)); 983 sg->len = cpu_to_caam32(mbuf->data_len); 984 mbuf = mbuf->next; 985 } 986 /* last element*/ 987 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN); 988 989 990 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_vtop_ctx(ctx, in_sg), 0, 991 length); 992 /*enabling sg bit */ 993 (jobdescr)->seq_in.command.word |= 0x01000000; 994 995 return ctx; 996 } 997 998 static inline struct caam_jr_op_ctx * 999 build_cipher_only(struct rte_crypto_op *op, struct caam_jr_session *ses) 1000 { 1001 struct rte_crypto_sym_op *sym = op->sym; 1002 struct caam_jr_op_ctx *ctx; 1003 struct sec4_sg_entry *sg; 1004 rte_iova_t src_start_addr, dst_start_addr; 1005 struct sec_cdb *cdb; 1006 uint64_t sdesc_offset; 1007 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1008 ses->iv.offset); 1009 struct sec_job_descriptor_t *jobdescr; 1010 1011 PMD_INIT_FUNC_TRACE(); 1012 ctx = caam_jr_alloc_ctx(ses); 1013 if (!ctx) 1014 return NULL; 1015 1016 ctx->op = op; 1017 cdb = ses->cdb; 1018 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb); 1019 1020 src_start_addr = rte_pktmbuf_iova(sym->m_src); 1021 if (sym->m_dst) 1022 dst_start_addr = rte_pktmbuf_iova(sym->m_dst); 1023 else 1024 dst_start_addr = src_start_addr; 1025 1026 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc; 1027 1028 SEC_JD_INIT(jobdescr); 1029 SEC_JD_SET_SD(jobdescr, 1030 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset, 1031 cdb->sh_hdr.hi.field.idlen); 1032 1033 #if CAAM_JR_DBG 1034 CAAM_JR_INFO("mbuf offset =%d, cipher offset = %d, length =%d+%d", 1035 sym->m_src->data_off, sym->cipher.data.offset, 1036 sym->cipher.data.length, ses->iv.length); 1037 #endif 1038 /* output */ 1039 SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)dst_start_addr, 1040 sym->cipher.data.offset, 1041 sym->cipher.data.length + ses->iv.length); 1042 1043 /*input */ 1044 sg = &ctx->sg[0]; 1045 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_vtop_ctx(ctx, sg), 0, 1046 sym->cipher.data.length + ses->iv.length); 1047 /*enabling sg bit */ 1048 (jobdescr)->seq_in.command.word |= 0x01000000; 1049 1050 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr)); 1051 sg->len = cpu_to_caam32(ses->iv.length); 1052 1053 sg = &ctx->sg[1]; 1054 sg->ptr = cpu_to_caam64(src_start_addr + sym->cipher.data.offset); 1055 sg->len = cpu_to_caam32(sym->cipher.data.length); 1056 /* last element*/ 1057 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN); 1058 1059 return ctx; 1060 } 1061 1062 /* For decapsulation: 1063 * Input: 1064 * +----+----------------+--------------------------------+-----+ 1065 * | IV | Auth-only data | Authenticated & Encrypted data | ICV | 1066 * +----+----------------+--------------------------------+-----+ 1067 * Output: 1068 * +----+--------------------------+ 1069 * | Decrypted & authenticated data | 1070 * +----+--------------------------+ 1071 */ 1072 1073 static inline struct caam_jr_op_ctx * 1074 build_cipher_auth_sg(struct rte_crypto_op *op, struct caam_jr_session *ses) 1075 { 1076 struct rte_crypto_sym_op *sym = op->sym; 1077 struct caam_jr_op_ctx *ctx; 1078 struct sec4_sg_entry *sg, *out_sg, *in_sg; 1079 struct rte_mbuf *mbuf; 1080 uint32_t length = 0; 1081 struct sec_cdb *cdb; 1082 uint64_t sdesc_offset; 1083 uint8_t req_segs; 1084 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1085 ses->iv.offset); 1086 struct sec_job_descriptor_t *jobdescr; 1087 uint32_t auth_only_len; 1088 1089 PMD_INIT_FUNC_TRACE(); 1090 auth_only_len = op->sym->auth.data.length - 1091 op->sym->cipher.data.length; 1092 1093 if (sym->m_dst) { 1094 mbuf = sym->m_dst; 1095 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3; 1096 } else { 1097 mbuf = sym->m_src; 1098 req_segs = mbuf->nb_segs * 2 + 3; 1099 } 1100 1101 if (req_segs > MAX_SG_ENTRIES) { 1102 CAAM_JR_DP_ERR("Cipher-Auth: Max sec segs supported is %d", 1103 MAX_SG_ENTRIES); 1104 return NULL; 1105 } 1106 1107 ctx = caam_jr_alloc_ctx(ses); 1108 if (!ctx) 1109 return NULL; 1110 1111 ctx->op = op; 1112 cdb = ses->cdb; 1113 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb); 1114 1115 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc; 1116 1117 SEC_JD_INIT(jobdescr); 1118 SEC_JD_SET_SD(jobdescr, 1119 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset, 1120 cdb->sh_hdr.hi.field.idlen); 1121 1122 /* output */ 1123 if (sym->m_dst) 1124 mbuf = sym->m_dst; 1125 else 1126 mbuf = sym->m_src; 1127 1128 out_sg = &ctx->sg[0]; 1129 if (is_encode(ses)) 1130 length = sym->auth.data.length + ses->digest_length; 1131 else 1132 length = sym->auth.data.length; 1133 1134 sg = &ctx->sg[0]; 1135 1136 /* 1st seg */ 1137 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf) 1138 + sym->auth.data.offset); 1139 sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset); 1140 1141 /* Successive segs */ 1142 mbuf = mbuf->next; 1143 while (mbuf) { 1144 sg++; 1145 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)); 1146 sg->len = cpu_to_caam32(mbuf->data_len); 1147 mbuf = mbuf->next; 1148 } 1149 1150 if (is_encode(ses)) { 1151 /* set auth output */ 1152 sg++; 1153 sg->ptr = cpu_to_caam64(sym->auth.digest.phys_addr); 1154 sg->len = cpu_to_caam32(ses->digest_length); 1155 } 1156 /* last element*/ 1157 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN); 1158 1159 SEC_JD_SET_OUT_PTR(jobdescr, 1160 (uint64_t)caam_jr_dma_vtop(out_sg), 0, length); 1161 /* set sg bit */ 1162 (jobdescr)->seq_out.command.word |= 0x01000000; 1163 1164 /* input */ 1165 sg++; 1166 mbuf = sym->m_src; 1167 in_sg = sg; 1168 if (is_encode(ses)) 1169 length = ses->iv.length + sym->auth.data.length; 1170 else 1171 length = ses->iv.length + sym->auth.data.length 1172 + ses->digest_length; 1173 1174 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr)); 1175 sg->len = cpu_to_caam32(ses->iv.length); 1176 1177 sg++; 1178 /* 1st seg */ 1179 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf) 1180 + sym->auth.data.offset); 1181 sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset); 1182 1183 /* Successive segs */ 1184 mbuf = mbuf->next; 1185 while (mbuf) { 1186 sg++; 1187 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)); 1188 sg->len = cpu_to_caam32(mbuf->data_len); 1189 mbuf = mbuf->next; 1190 } 1191 1192 if (is_decode(ses)) { 1193 sg++; 1194 rte_memcpy(ctx->digest, sym->auth.digest.data, 1195 ses->digest_length); 1196 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(ctx->digest)); 1197 sg->len = cpu_to_caam32(ses->digest_length); 1198 } 1199 /* last element*/ 1200 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN); 1201 1202 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_dma_vtop(in_sg), 0, 1203 length); 1204 /* set sg bit */ 1205 (jobdescr)->seq_in.command.word |= 0x01000000; 1206 /* Auth_only_len is set as 0 in descriptor and it is 1207 * overwritten here in the jd which will update 1208 * the DPOVRD reg. 1209 */ 1210 if (auth_only_len) 1211 /* set sg bit */ 1212 (jobdescr)->dpovrd = 0x80000000 | auth_only_len; 1213 1214 return ctx; 1215 } 1216 1217 static inline struct caam_jr_op_ctx * 1218 build_cipher_auth(struct rte_crypto_op *op, struct caam_jr_session *ses) 1219 { 1220 struct rte_crypto_sym_op *sym = op->sym; 1221 struct caam_jr_op_ctx *ctx; 1222 struct sec4_sg_entry *sg; 1223 rte_iova_t src_start_addr, dst_start_addr; 1224 uint32_t length = 0; 1225 struct sec_cdb *cdb; 1226 uint64_t sdesc_offset; 1227 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, 1228 ses->iv.offset); 1229 struct sec_job_descriptor_t *jobdescr; 1230 uint32_t auth_only_len; 1231 1232 PMD_INIT_FUNC_TRACE(); 1233 auth_only_len = op->sym->auth.data.length - 1234 op->sym->cipher.data.length; 1235 1236 src_start_addr = rte_pktmbuf_iova(sym->m_src); 1237 if (sym->m_dst) 1238 dst_start_addr = rte_pktmbuf_iova(sym->m_dst); 1239 else 1240 dst_start_addr = src_start_addr; 1241 1242 ctx = caam_jr_alloc_ctx(ses); 1243 if (!ctx) 1244 return NULL; 1245 1246 ctx->op = op; 1247 cdb = ses->cdb; 1248 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb); 1249 1250 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc; 1251 1252 SEC_JD_INIT(jobdescr); 1253 SEC_JD_SET_SD(jobdescr, 1254 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset, 1255 cdb->sh_hdr.hi.field.idlen); 1256 1257 /* input */ 1258 sg = &ctx->sg[0]; 1259 if (is_encode(ses)) { 1260 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr)); 1261 sg->len = cpu_to_caam32(ses->iv.length); 1262 length += ses->iv.length; 1263 1264 sg++; 1265 sg->ptr = cpu_to_caam64(src_start_addr + sym->auth.data.offset); 1266 sg->len = cpu_to_caam32(sym->auth.data.length); 1267 length += sym->auth.data.length; 1268 /* last element*/ 1269 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN); 1270 } else { 1271 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr)); 1272 sg->len = cpu_to_caam32(ses->iv.length); 1273 length += ses->iv.length; 1274 1275 sg++; 1276 sg->ptr = cpu_to_caam64(src_start_addr + sym->auth.data.offset); 1277 sg->len = cpu_to_caam32(sym->auth.data.length); 1278 length += sym->auth.data.length; 1279 1280 rte_memcpy(ctx->digest, sym->auth.digest.data, 1281 ses->digest_length); 1282 sg++; 1283 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(ctx->digest)); 1284 sg->len = cpu_to_caam32(ses->digest_length); 1285 length += ses->digest_length; 1286 /* last element*/ 1287 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN); 1288 } 1289 1290 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_dma_vtop(&ctx->sg[0]), 0, 1291 length); 1292 /* set sg bit */ 1293 (jobdescr)->seq_in.command.word |= 0x01000000; 1294 1295 /* output */ 1296 sg = &ctx->sg[6]; 1297 1298 sg->ptr = cpu_to_caam64(dst_start_addr + sym->cipher.data.offset); 1299 sg->len = cpu_to_caam32(sym->cipher.data.length); 1300 length = sym->cipher.data.length; 1301 1302 if (is_encode(ses)) { 1303 /* set auth output */ 1304 sg++; 1305 sg->ptr = cpu_to_caam64(sym->auth.digest.phys_addr); 1306 sg->len = cpu_to_caam32(ses->digest_length); 1307 length += ses->digest_length; 1308 } 1309 /* last element*/ 1310 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN); 1311 1312 SEC_JD_SET_OUT_PTR(jobdescr, 1313 (uint64_t)caam_jr_dma_vtop(&ctx->sg[6]), 0, length); 1314 /* set sg bit */ 1315 (jobdescr)->seq_out.command.word |= 0x01000000; 1316 1317 /* Auth_only_len is set as 0 in descriptor and it is 1318 * overwritten here in the jd which will update 1319 * the DPOVRD reg. 1320 */ 1321 if (auth_only_len) 1322 /* set sg bit */ 1323 (jobdescr)->dpovrd = 0x80000000 | auth_only_len; 1324 1325 return ctx; 1326 } 1327 1328 static inline struct caam_jr_op_ctx * 1329 build_proto(struct rte_crypto_op *op, struct caam_jr_session *ses) 1330 { 1331 struct rte_crypto_sym_op *sym = op->sym; 1332 struct caam_jr_op_ctx *ctx = NULL; 1333 phys_addr_t src_start_addr, dst_start_addr; 1334 struct sec_cdb *cdb; 1335 uint64_t sdesc_offset; 1336 struct sec_job_descriptor_t *jobdescr; 1337 1338 PMD_INIT_FUNC_TRACE(); 1339 ctx = caam_jr_alloc_ctx(ses); 1340 if (!ctx) 1341 return NULL; 1342 ctx->op = op; 1343 1344 src_start_addr = rte_pktmbuf_iova(sym->m_src); 1345 if (sym->m_dst) 1346 dst_start_addr = rte_pktmbuf_iova(sym->m_dst); 1347 else 1348 dst_start_addr = src_start_addr; 1349 1350 cdb = ses->cdb; 1351 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb); 1352 1353 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc; 1354 1355 SEC_JD_INIT(jobdescr); 1356 SEC_JD_SET_SD(jobdescr, 1357 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset, 1358 cdb->sh_hdr.hi.field.idlen); 1359 1360 /* output */ 1361 SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)dst_start_addr, 0, 1362 sym->m_src->buf_len - sym->m_src->data_off); 1363 /* input */ 1364 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)src_start_addr, 0, 1365 sym->m_src->pkt_len); 1366 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK; 1367 1368 return ctx; 1369 } 1370 1371 static int 1372 caam_jr_enqueue_op(struct rte_crypto_op *op, struct caam_jr_qp *qp) 1373 { 1374 struct sec_job_ring_t *ring = qp->ring; 1375 struct caam_jr_session *ses; 1376 struct caam_jr_op_ctx *ctx = NULL; 1377 struct sec_job_descriptor_t *jobdescr __rte_unused; 1378 1379 PMD_INIT_FUNC_TRACE(); 1380 switch (op->sess_type) { 1381 case RTE_CRYPTO_OP_WITH_SESSION: 1382 ses = (struct caam_jr_session *) 1383 get_sym_session_private_data(op->sym->session, 1384 cryptodev_driver_id); 1385 break; 1386 case RTE_CRYPTO_OP_SECURITY_SESSION: 1387 ses = (struct caam_jr_session *) 1388 get_sec_session_private_data( 1389 op->sym->sec_session); 1390 break; 1391 default: 1392 CAAM_JR_DP_ERR("sessionless crypto op not supported"); 1393 qp->tx_errs++; 1394 return -1; 1395 } 1396 1397 if (unlikely(!ses->qp || ses->qp != qp)) { 1398 CAAM_JR_DP_DEBUG("Old:sess->qp=%p New qp = %p\n", ses->qp, qp); 1399 ses->qp = qp; 1400 caam_jr_prep_cdb(ses); 1401 } 1402 1403 if (rte_pktmbuf_is_contiguous(op->sym->m_src)) { 1404 if (is_auth_cipher(ses)) 1405 ctx = build_cipher_auth(op, ses); 1406 else if (is_aead(ses)) 1407 goto err1; 1408 else if (is_auth_only(ses)) 1409 ctx = build_auth_only(op, ses); 1410 else if (is_cipher_only(ses)) 1411 ctx = build_cipher_only(op, ses); 1412 else if (is_proto_ipsec(ses)) 1413 ctx = build_proto(op, ses); 1414 } else { 1415 if (is_auth_cipher(ses)) 1416 ctx = build_cipher_auth_sg(op, ses); 1417 else if (is_aead(ses)) 1418 goto err1; 1419 else if (is_auth_only(ses)) 1420 ctx = build_auth_only_sg(op, ses); 1421 else if (is_cipher_only(ses)) 1422 ctx = build_cipher_only_sg(op, ses); 1423 } 1424 err1: 1425 if (unlikely(!ctx)) { 1426 qp->tx_errs++; 1427 CAAM_JR_ERR("not supported sec op"); 1428 return -1; 1429 } 1430 #if CAAM_JR_DBG 1431 if (is_decode(ses)) 1432 rte_hexdump(stdout, "DECODE", 1433 rte_pktmbuf_mtod(op->sym->m_src, void *), 1434 rte_pktmbuf_data_len(op->sym->m_src)); 1435 else 1436 rte_hexdump(stdout, "ENCODE", 1437 rte_pktmbuf_mtod(op->sym->m_src, void *), 1438 rte_pktmbuf_data_len(op->sym->m_src)); 1439 1440 printf("\n JD before conversion\n"); 1441 for (int i = 0; i < 12; i++) 1442 printf("\n 0x%08x", ctx->jobdes.desc[i]); 1443 #endif 1444 1445 CAAM_JR_DP_DEBUG("Jr[%p] pi[%d] ci[%d].Before sending desc", 1446 ring, ring->pidx, ring->cidx); 1447 1448 /* todo - do we want to retry */ 1449 if (SEC_JOB_RING_IS_FULL(ring->pidx, ring->cidx, 1450 SEC_JOB_RING_SIZE, SEC_JOB_RING_SIZE)) { 1451 CAAM_JR_DP_DEBUG("Ring FULL Jr[%p] pi[%d] ci[%d].Size = %d", 1452 ring, ring->pidx, ring->cidx, SEC_JOB_RING_SIZE); 1453 caam_jr_op_ending(ctx); 1454 qp->tx_ring_full++; 1455 return -EBUSY; 1456 } 1457 1458 #if CORE_BYTE_ORDER != CAAM_BYTE_ORDER 1459 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc; 1460 1461 jobdescr->deschdr.command.word = 1462 cpu_to_caam32(jobdescr->deschdr.command.word); 1463 jobdescr->sd_ptr = cpu_to_caam64(jobdescr->sd_ptr); 1464 jobdescr->seq_out.command.word = 1465 cpu_to_caam32(jobdescr->seq_out.command.word); 1466 jobdescr->seq_out_ptr = cpu_to_caam64(jobdescr->seq_out_ptr); 1467 jobdescr->out_ext_length = cpu_to_caam32(jobdescr->out_ext_length); 1468 jobdescr->seq_in.command.word = 1469 cpu_to_caam32(jobdescr->seq_in.command.word); 1470 jobdescr->seq_in_ptr = cpu_to_caam64(jobdescr->seq_in_ptr); 1471 jobdescr->in_ext_length = cpu_to_caam32(jobdescr->in_ext_length); 1472 jobdescr->load_dpovrd.command.word = 1473 cpu_to_caam32(jobdescr->load_dpovrd.command.word); 1474 jobdescr->dpovrd = cpu_to_caam32(jobdescr->dpovrd); 1475 #endif 1476 1477 /* Set ptr in input ring to current descriptor */ 1478 sec_write_addr(&ring->input_ring[ring->pidx], 1479 (phys_addr_t)caam_jr_vtop_ctx(ctx, ctx->jobdes.desc)); 1480 rte_smp_wmb(); 1481 1482 /* Notify HW that a new job is enqueued */ 1483 hw_enqueue_desc_on_job_ring(ring); 1484 1485 /* increment the producer index for the current job ring */ 1486 ring->pidx = SEC_CIRCULAR_COUNTER(ring->pidx, SEC_JOB_RING_SIZE); 1487 1488 return 0; 1489 } 1490 1491 static uint16_t 1492 caam_jr_enqueue_burst(void *qp, struct rte_crypto_op **ops, 1493 uint16_t nb_ops) 1494 { 1495 /* Function to transmit the frames to given device and queuepair */ 1496 uint32_t loop; 1497 int32_t ret; 1498 struct caam_jr_qp *jr_qp = (struct caam_jr_qp *)qp; 1499 uint16_t num_tx = 0; 1500 1501 PMD_INIT_FUNC_TRACE(); 1502 /*Prepare each packet which is to be sent*/ 1503 for (loop = 0; loop < nb_ops; loop++) { 1504 ret = caam_jr_enqueue_op(ops[loop], jr_qp); 1505 if (!ret) 1506 num_tx++; 1507 } 1508 1509 jr_qp->tx_pkts += num_tx; 1510 1511 return num_tx; 1512 } 1513 1514 /* Release queue pair */ 1515 static int 1516 caam_jr_queue_pair_release(struct rte_cryptodev *dev, 1517 uint16_t qp_id) 1518 { 1519 struct sec_job_ring_t *internals; 1520 struct caam_jr_qp *qp = NULL; 1521 1522 PMD_INIT_FUNC_TRACE(); 1523 CAAM_JR_DEBUG("dev =%p, queue =%d", dev, qp_id); 1524 1525 internals = dev->data->dev_private; 1526 if (qp_id >= internals->max_nb_queue_pairs) { 1527 CAAM_JR_ERR("Max supported qpid %d", 1528 internals->max_nb_queue_pairs); 1529 return -EINVAL; 1530 } 1531 1532 qp = &internals->qps[qp_id]; 1533 qp->ring = NULL; 1534 dev->data->queue_pairs[qp_id] = NULL; 1535 1536 return 0; 1537 } 1538 1539 /* Setup a queue pair */ 1540 static int 1541 caam_jr_queue_pair_setup( 1542 struct rte_cryptodev *dev, uint16_t qp_id, 1543 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf, 1544 __rte_unused int socket_id) 1545 { 1546 struct sec_job_ring_t *internals; 1547 struct caam_jr_qp *qp = NULL; 1548 1549 PMD_INIT_FUNC_TRACE(); 1550 CAAM_JR_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf); 1551 1552 internals = dev->data->dev_private; 1553 if (qp_id >= internals->max_nb_queue_pairs) { 1554 CAAM_JR_ERR("Max supported qpid %d", 1555 internals->max_nb_queue_pairs); 1556 return -EINVAL; 1557 } 1558 1559 qp = &internals->qps[qp_id]; 1560 qp->ring = internals; 1561 dev->data->queue_pairs[qp_id] = qp; 1562 1563 return 0; 1564 } 1565 1566 /* Return the number of allocated queue pairs */ 1567 static uint32_t 1568 caam_jr_queue_pair_count(struct rte_cryptodev *dev) 1569 { 1570 PMD_INIT_FUNC_TRACE(); 1571 1572 return dev->data->nb_queue_pairs; 1573 } 1574 1575 /* Returns the size of the aesni gcm session structure */ 1576 static unsigned int 1577 caam_jr_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) 1578 { 1579 PMD_INIT_FUNC_TRACE(); 1580 1581 return sizeof(struct caam_jr_session); 1582 } 1583 1584 static int 1585 caam_jr_cipher_init(struct rte_cryptodev *dev __rte_unused, 1586 struct rte_crypto_sym_xform *xform, 1587 struct caam_jr_session *session) 1588 { 1589 PMD_INIT_FUNC_TRACE(); 1590 session->cipher_alg = xform->cipher.algo; 1591 session->iv.length = xform->cipher.iv.length; 1592 session->iv.offset = xform->cipher.iv.offset; 1593 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, 1594 RTE_CACHE_LINE_SIZE); 1595 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) { 1596 CAAM_JR_ERR("No Memory for cipher key\n"); 1597 return -ENOMEM; 1598 } 1599 session->cipher_key.length = xform->cipher.key.length; 1600 1601 memcpy(session->cipher_key.data, xform->cipher.key.data, 1602 xform->cipher.key.length); 1603 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 1604 DIR_ENC : DIR_DEC; 1605 1606 return 0; 1607 } 1608 1609 static int 1610 caam_jr_auth_init(struct rte_cryptodev *dev __rte_unused, 1611 struct rte_crypto_sym_xform *xform, 1612 struct caam_jr_session *session) 1613 { 1614 PMD_INIT_FUNC_TRACE(); 1615 session->auth_alg = xform->auth.algo; 1616 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length, 1617 RTE_CACHE_LINE_SIZE); 1618 if (session->auth_key.data == NULL && xform->auth.key.length > 0) { 1619 CAAM_JR_ERR("No Memory for auth key\n"); 1620 return -ENOMEM; 1621 } 1622 session->auth_key.length = xform->auth.key.length; 1623 session->digest_length = xform->auth.digest_length; 1624 1625 memcpy(session->auth_key.data, xform->auth.key.data, 1626 xform->auth.key.length); 1627 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? 1628 DIR_ENC : DIR_DEC; 1629 1630 return 0; 1631 } 1632 1633 static int 1634 caam_jr_aead_init(struct rte_cryptodev *dev __rte_unused, 1635 struct rte_crypto_sym_xform *xform, 1636 struct caam_jr_session *session) 1637 { 1638 PMD_INIT_FUNC_TRACE(); 1639 session->aead_alg = xform->aead.algo; 1640 session->iv.length = xform->aead.iv.length; 1641 session->iv.offset = xform->aead.iv.offset; 1642 session->auth_only_len = xform->aead.aad_length; 1643 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length, 1644 RTE_CACHE_LINE_SIZE); 1645 if (session->aead_key.data == NULL && xform->aead.key.length > 0) { 1646 CAAM_JR_ERR("No Memory for aead key\n"); 1647 return -ENOMEM; 1648 } 1649 session->aead_key.length = xform->aead.key.length; 1650 session->digest_length = xform->aead.digest_length; 1651 1652 memcpy(session->aead_key.data, xform->aead.key.data, 1653 xform->aead.key.length); 1654 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? 1655 DIR_ENC : DIR_DEC; 1656 1657 return 0; 1658 } 1659 1660 static int 1661 caam_jr_set_session_parameters(struct rte_cryptodev *dev, 1662 struct rte_crypto_sym_xform *xform, void *sess) 1663 { 1664 struct sec_job_ring_t *internals = dev->data->dev_private; 1665 struct caam_jr_session *session = sess; 1666 1667 PMD_INIT_FUNC_TRACE(); 1668 1669 if (unlikely(sess == NULL)) { 1670 CAAM_JR_ERR("invalid session struct"); 1671 return -EINVAL; 1672 } 1673 1674 /* Default IV length = 0 */ 1675 session->iv.length = 0; 1676 1677 /* Cipher Only */ 1678 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { 1679 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 1680 caam_jr_cipher_init(dev, xform, session); 1681 1682 /* Authentication Only */ 1683 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 1684 xform->next == NULL) { 1685 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; 1686 caam_jr_auth_init(dev, xform, session); 1687 1688 /* Cipher then Authenticate */ 1689 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 1690 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 1691 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) { 1692 caam_jr_cipher_init(dev, xform, session); 1693 caam_jr_auth_init(dev, xform->next, session); 1694 } else { 1695 CAAM_JR_ERR("Not supported: Auth then Cipher"); 1696 goto err1; 1697 } 1698 1699 /* Authenticate then Cipher */ 1700 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 1701 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 1702 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) { 1703 caam_jr_auth_init(dev, xform, session); 1704 caam_jr_cipher_init(dev, xform->next, session); 1705 } else { 1706 CAAM_JR_ERR("Not supported: Auth then Cipher"); 1707 goto err1; 1708 } 1709 1710 /* AEAD operation for AES-GCM kind of Algorithms */ 1711 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && 1712 xform->next == NULL) { 1713 caam_jr_aead_init(dev, xform, session); 1714 1715 } else { 1716 CAAM_JR_ERR("Invalid crypto type"); 1717 return -EINVAL; 1718 } 1719 session->ctx_pool = internals->ctx_pool; 1720 1721 return 0; 1722 1723 err1: 1724 rte_free(session->cipher_key.data); 1725 rte_free(session->auth_key.data); 1726 memset(session, 0, sizeof(struct caam_jr_session)); 1727 1728 return -EINVAL; 1729 } 1730 1731 static int 1732 caam_jr_sym_session_configure(struct rte_cryptodev *dev, 1733 struct rte_crypto_sym_xform *xform, 1734 struct rte_cryptodev_sym_session *sess, 1735 struct rte_mempool *mempool) 1736 { 1737 void *sess_private_data; 1738 int ret; 1739 1740 PMD_INIT_FUNC_TRACE(); 1741 1742 if (rte_mempool_get(mempool, &sess_private_data)) { 1743 CAAM_JR_ERR("Couldn't get object from session mempool"); 1744 return -ENOMEM; 1745 } 1746 1747 memset(sess_private_data, 0, sizeof(struct caam_jr_session)); 1748 ret = caam_jr_set_session_parameters(dev, xform, sess_private_data); 1749 if (ret != 0) { 1750 CAAM_JR_ERR("failed to configure session parameters"); 1751 /* Return session to mempool */ 1752 rte_mempool_put(mempool, sess_private_data); 1753 return ret; 1754 } 1755 1756 set_sym_session_private_data(sess, dev->driver_id, sess_private_data); 1757 1758 return 0; 1759 } 1760 1761 /* Clear the memory of session so it doesn't leave key material behind */ 1762 static void 1763 caam_jr_sym_session_clear(struct rte_cryptodev *dev, 1764 struct rte_cryptodev_sym_session *sess) 1765 { 1766 uint8_t index = dev->driver_id; 1767 void *sess_priv = get_sym_session_private_data(sess, index); 1768 struct caam_jr_session *s = (struct caam_jr_session *)sess_priv; 1769 1770 PMD_INIT_FUNC_TRACE(); 1771 1772 if (sess_priv) { 1773 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 1774 1775 rte_free(s->cipher_key.data); 1776 rte_free(s->auth_key.data); 1777 memset(s, 0, sizeof(struct caam_jr_session)); 1778 set_sym_session_private_data(sess, index, NULL); 1779 rte_mempool_put(sess_mp, sess_priv); 1780 } 1781 } 1782 1783 static int 1784 caam_jr_set_ipsec_session(__rte_unused struct rte_cryptodev *dev, 1785 struct rte_security_session_conf *conf, 1786 void *sess) 1787 { 1788 struct sec_job_ring_t *internals = dev->data->dev_private; 1789 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec; 1790 struct rte_crypto_auth_xform *auth_xform; 1791 struct rte_crypto_cipher_xform *cipher_xform; 1792 struct caam_jr_session *session = (struct caam_jr_session *)sess; 1793 1794 PMD_INIT_FUNC_TRACE(); 1795 1796 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 1797 cipher_xform = &conf->crypto_xform->cipher; 1798 auth_xform = &conf->crypto_xform->next->auth; 1799 } else { 1800 auth_xform = &conf->crypto_xform->auth; 1801 cipher_xform = &conf->crypto_xform->next->cipher; 1802 } 1803 session->proto_alg = conf->protocol; 1804 session->cipher_key.data = rte_zmalloc(NULL, 1805 cipher_xform->key.length, 1806 RTE_CACHE_LINE_SIZE); 1807 if (session->cipher_key.data == NULL && 1808 cipher_xform->key.length > 0) { 1809 CAAM_JR_ERR("No Memory for cipher key\n"); 1810 return -ENOMEM; 1811 } 1812 1813 session->cipher_key.length = cipher_xform->key.length; 1814 session->auth_key.data = rte_zmalloc(NULL, 1815 auth_xform->key.length, 1816 RTE_CACHE_LINE_SIZE); 1817 if (session->auth_key.data == NULL && 1818 auth_xform->key.length > 0) { 1819 CAAM_JR_ERR("No Memory for auth key\n"); 1820 rte_free(session->cipher_key.data); 1821 return -ENOMEM; 1822 } 1823 session->auth_key.length = auth_xform->key.length; 1824 memcpy(session->cipher_key.data, cipher_xform->key.data, 1825 cipher_xform->key.length); 1826 memcpy(session->auth_key.data, auth_xform->key.data, 1827 auth_xform->key.length); 1828 1829 switch (auth_xform->algo) { 1830 case RTE_CRYPTO_AUTH_SHA1_HMAC: 1831 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; 1832 break; 1833 case RTE_CRYPTO_AUTH_MD5_HMAC: 1834 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; 1835 break; 1836 case RTE_CRYPTO_AUTH_SHA256_HMAC: 1837 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; 1838 break; 1839 case RTE_CRYPTO_AUTH_SHA384_HMAC: 1840 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; 1841 break; 1842 case RTE_CRYPTO_AUTH_SHA512_HMAC: 1843 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; 1844 break; 1845 case RTE_CRYPTO_AUTH_AES_CMAC: 1846 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC; 1847 break; 1848 case RTE_CRYPTO_AUTH_NULL: 1849 session->auth_alg = RTE_CRYPTO_AUTH_NULL; 1850 break; 1851 case RTE_CRYPTO_AUTH_SHA224_HMAC: 1852 case RTE_CRYPTO_AUTH_AES_XCBC_MAC: 1853 case RTE_CRYPTO_AUTH_SNOW3G_UIA2: 1854 case RTE_CRYPTO_AUTH_SHA1: 1855 case RTE_CRYPTO_AUTH_SHA256: 1856 case RTE_CRYPTO_AUTH_SHA512: 1857 case RTE_CRYPTO_AUTH_SHA224: 1858 case RTE_CRYPTO_AUTH_SHA384: 1859 case RTE_CRYPTO_AUTH_MD5: 1860 case RTE_CRYPTO_AUTH_AES_GMAC: 1861 case RTE_CRYPTO_AUTH_KASUMI_F9: 1862 case RTE_CRYPTO_AUTH_AES_CBC_MAC: 1863 case RTE_CRYPTO_AUTH_ZUC_EIA3: 1864 CAAM_JR_ERR("Crypto: Unsupported auth alg %u\n", 1865 auth_xform->algo); 1866 goto out; 1867 default: 1868 CAAM_JR_ERR("Crypto: Undefined Auth specified %u\n", 1869 auth_xform->algo); 1870 goto out; 1871 } 1872 1873 switch (cipher_xform->algo) { 1874 case RTE_CRYPTO_CIPHER_AES_CBC: 1875 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; 1876 break; 1877 case RTE_CRYPTO_CIPHER_3DES_CBC: 1878 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; 1879 break; 1880 case RTE_CRYPTO_CIPHER_AES_CTR: 1881 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; 1882 break; 1883 case RTE_CRYPTO_CIPHER_NULL: 1884 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: 1885 case RTE_CRYPTO_CIPHER_3DES_ECB: 1886 case RTE_CRYPTO_CIPHER_AES_ECB: 1887 case RTE_CRYPTO_CIPHER_KASUMI_F8: 1888 CAAM_JR_ERR("Crypto: Unsupported Cipher alg %u\n", 1889 cipher_xform->algo); 1890 goto out; 1891 default: 1892 CAAM_JR_ERR("Crypto: Undefined Cipher specified %u\n", 1893 cipher_xform->algo); 1894 goto out; 1895 } 1896 1897 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 1898 memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) + 1899 sizeof(session->ip4_hdr)); 1900 session->ip4_hdr.ip_v = IPVERSION; 1901 session->ip4_hdr.ip_hl = 5; 1902 session->ip4_hdr.ip_len = rte_cpu_to_be_16( 1903 sizeof(session->ip4_hdr)); 1904 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp; 1905 session->ip4_hdr.ip_id = 0; 1906 session->ip4_hdr.ip_off = 0; 1907 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl; 1908 session->ip4_hdr.ip_p = (ipsec_xform->proto == 1909 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP 1910 : IPPROTO_AH; 1911 session->ip4_hdr.ip_sum = 0; 1912 session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip; 1913 session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip; 1914 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *) 1915 (void *)&session->ip4_hdr, 1916 sizeof(struct ip)); 1917 1918 session->encap_pdb.options = 1919 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) | 1920 PDBOPTS_ESP_OIHI_PDB_INL | 1921 PDBOPTS_ESP_IVSRC | 1922 PDBHMO_ESP_ENCAP_DTTL; 1923 session->encap_pdb.spi = ipsec_xform->spi; 1924 session->encap_pdb.ip_hdr_len = sizeof(struct ip); 1925 1926 session->dir = DIR_ENC; 1927 } else if (ipsec_xform->direction == 1928 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { 1929 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb)); 1930 session->decap_pdb.options = sizeof(struct ip) << 16; 1931 session->dir = DIR_DEC; 1932 } else 1933 goto out; 1934 session->ctx_pool = internals->ctx_pool; 1935 1936 return 0; 1937 out: 1938 rte_free(session->auth_key.data); 1939 rte_free(session->cipher_key.data); 1940 memset(session, 0, sizeof(struct caam_jr_session)); 1941 return -1; 1942 } 1943 1944 static int 1945 caam_jr_security_session_create(void *dev, 1946 struct rte_security_session_conf *conf, 1947 struct rte_security_session *sess, 1948 struct rte_mempool *mempool) 1949 { 1950 void *sess_private_data; 1951 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; 1952 int ret; 1953 1954 PMD_INIT_FUNC_TRACE(); 1955 if (rte_mempool_get(mempool, &sess_private_data)) { 1956 CAAM_JR_ERR("Couldn't get object from session mempool"); 1957 return -ENOMEM; 1958 } 1959 1960 switch (conf->protocol) { 1961 case RTE_SECURITY_PROTOCOL_IPSEC: 1962 ret = caam_jr_set_ipsec_session(cdev, conf, 1963 sess_private_data); 1964 break; 1965 case RTE_SECURITY_PROTOCOL_MACSEC: 1966 return -ENOTSUP; 1967 default: 1968 return -EINVAL; 1969 } 1970 if (ret != 0) { 1971 CAAM_JR_ERR("failed to configure session parameters"); 1972 /* Return session to mempool */ 1973 rte_mempool_put(mempool, sess_private_data); 1974 return ret; 1975 } 1976 1977 set_sec_session_private_data(sess, sess_private_data); 1978 1979 return ret; 1980 } 1981 1982 /* Clear the memory of session so it doesn't leave key material behind */ 1983 static int 1984 caam_jr_security_session_destroy(void *dev __rte_unused, 1985 struct rte_security_session *sess) 1986 { 1987 PMD_INIT_FUNC_TRACE(); 1988 void *sess_priv = get_sec_session_private_data(sess); 1989 1990 struct caam_jr_session *s = (struct caam_jr_session *)sess_priv; 1991 1992 if (sess_priv) { 1993 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); 1994 1995 rte_free(s->cipher_key.data); 1996 rte_free(s->auth_key.data); 1997 memset(sess, 0, sizeof(struct caam_jr_session)); 1998 set_sec_session_private_data(sess, NULL); 1999 rte_mempool_put(sess_mp, sess_priv); 2000 } 2001 return 0; 2002 } 2003 2004 2005 static int 2006 caam_jr_dev_configure(struct rte_cryptodev *dev, 2007 struct rte_cryptodev_config *config __rte_unused) 2008 { 2009 char str[20]; 2010 struct sec_job_ring_t *internals; 2011 2012 PMD_INIT_FUNC_TRACE(); 2013 2014 internals = dev->data->dev_private; 2015 snprintf(str, sizeof(str), "ctx_pool_%d", dev->data->dev_id); 2016 if (!internals->ctx_pool) { 2017 internals->ctx_pool = rte_mempool_create((const char *)str, 2018 CTX_POOL_NUM_BUFS, 2019 sizeof(struct caam_jr_op_ctx), 2020 CTX_POOL_CACHE_SIZE, 0, 2021 NULL, NULL, NULL, NULL, 2022 SOCKET_ID_ANY, 0); 2023 if (!internals->ctx_pool) { 2024 CAAM_JR_ERR("%s create failed\n", str); 2025 return -ENOMEM; 2026 } 2027 } else 2028 CAAM_JR_INFO("mempool already created for dev_id : %d", 2029 dev->data->dev_id); 2030 2031 return 0; 2032 } 2033 2034 static int 2035 caam_jr_dev_start(struct rte_cryptodev *dev __rte_unused) 2036 { 2037 PMD_INIT_FUNC_TRACE(); 2038 return 0; 2039 } 2040 2041 static void 2042 caam_jr_dev_stop(struct rte_cryptodev *dev __rte_unused) 2043 { 2044 PMD_INIT_FUNC_TRACE(); 2045 } 2046 2047 static int 2048 caam_jr_dev_close(struct rte_cryptodev *dev) 2049 { 2050 struct sec_job_ring_t *internals; 2051 2052 PMD_INIT_FUNC_TRACE(); 2053 2054 if (dev == NULL) 2055 return -ENOMEM; 2056 2057 internals = dev->data->dev_private; 2058 rte_mempool_free(internals->ctx_pool); 2059 internals->ctx_pool = NULL; 2060 2061 return 0; 2062 } 2063 2064 static void 2065 caam_jr_dev_infos_get(struct rte_cryptodev *dev, 2066 struct rte_cryptodev_info *info) 2067 { 2068 struct sec_job_ring_t *internals = dev->data->dev_private; 2069 2070 PMD_INIT_FUNC_TRACE(); 2071 if (info != NULL) { 2072 info->max_nb_queue_pairs = internals->max_nb_queue_pairs; 2073 info->feature_flags = dev->feature_flags; 2074 info->capabilities = caam_jr_get_cryptodev_capabilities(); 2075 info->sym.max_nb_sessions = internals->max_nb_sessions; 2076 info->driver_id = cryptodev_driver_id; 2077 } 2078 } 2079 2080 static struct rte_cryptodev_ops caam_jr_ops = { 2081 .dev_configure = caam_jr_dev_configure, 2082 .dev_start = caam_jr_dev_start, 2083 .dev_stop = caam_jr_dev_stop, 2084 .dev_close = caam_jr_dev_close, 2085 .dev_infos_get = caam_jr_dev_infos_get, 2086 .stats_get = caam_jr_stats_get, 2087 .stats_reset = caam_jr_stats_reset, 2088 .queue_pair_setup = caam_jr_queue_pair_setup, 2089 .queue_pair_release = caam_jr_queue_pair_release, 2090 .queue_pair_count = caam_jr_queue_pair_count, 2091 .sym_session_get_size = caam_jr_sym_session_get_size, 2092 .sym_session_configure = caam_jr_sym_session_configure, 2093 .sym_session_clear = caam_jr_sym_session_clear 2094 }; 2095 2096 static struct rte_security_ops caam_jr_security_ops = { 2097 .session_create = caam_jr_security_session_create, 2098 .session_update = NULL, 2099 .session_stats_get = NULL, 2100 .session_destroy = caam_jr_security_session_destroy, 2101 .set_pkt_metadata = NULL, 2102 .capabilities_get = caam_jr_get_security_capabilities 2103 }; 2104 2105 /* @brief Flush job rings of any processed descs. 2106 * The processed descs are silently dropped, 2107 * WITHOUT being notified to UA. 2108 */ 2109 static void 2110 close_job_ring(struct sec_job_ring_t *job_ring) 2111 { 2112 PMD_INIT_FUNC_TRACE(); 2113 if (job_ring->irq_fd) { 2114 /* Producer index is frozen. If consumer index is not equal 2115 * with producer index, then we have descs to flush. 2116 */ 2117 while (job_ring->pidx != job_ring->cidx) 2118 hw_flush_job_ring(job_ring, false, NULL); 2119 2120 /* free the uio job ring */ 2121 free_job_ring(job_ring->irq_fd); 2122 job_ring->irq_fd = 0; 2123 caam_jr_dma_free(job_ring->input_ring); 2124 caam_jr_dma_free(job_ring->output_ring); 2125 g_job_rings_no--; 2126 } 2127 } 2128 2129 /** @brief Release the software and hardware resources tied to a job ring. 2130 * @param [in] job_ring The job ring 2131 * 2132 * @retval 0 for success 2133 * @retval -1 for error 2134 */ 2135 static int 2136 shutdown_job_ring(struct sec_job_ring_t *job_ring) 2137 { 2138 int ret = 0; 2139 2140 PMD_INIT_FUNC_TRACE(); 2141 ASSERT(job_ring != NULL); 2142 ret = hw_shutdown_job_ring(job_ring); 2143 SEC_ASSERT(ret == 0, ret, 2144 "Failed to shutdown hardware job ring %p", 2145 job_ring); 2146 2147 if (job_ring->coalescing_en) 2148 hw_job_ring_disable_coalescing(job_ring); 2149 2150 if (job_ring->jr_mode != SEC_NOTIFICATION_TYPE_POLL) { 2151 ret = caam_jr_disable_irqs(job_ring->irq_fd); 2152 SEC_ASSERT(ret == 0, ret, 2153 "Failed to disable irqs for job ring %p", 2154 job_ring); 2155 } 2156 2157 return ret; 2158 } 2159 2160 /* 2161 * @brief Release the resources used by the SEC user space driver. 2162 * 2163 * Reset and release SEC's job rings indicated by the User Application at 2164 * init_job_ring() and free any memory allocated internally. 2165 * Call once during application tear down. 2166 * 2167 * @note In case there are any descriptors in-flight (descriptors received by 2168 * SEC driver for processing and for which no response was yet provided to UA), 2169 * the descriptors are discarded without any notifications to User Application. 2170 * 2171 * @retval ::0 is returned for a successful execution 2172 * @retval ::-1 is returned if SEC driver release is in progress 2173 */ 2174 static int 2175 caam_jr_dev_uninit(struct rte_cryptodev *dev) 2176 { 2177 struct sec_job_ring_t *internals; 2178 2179 PMD_INIT_FUNC_TRACE(); 2180 if (dev == NULL) 2181 return -ENODEV; 2182 2183 internals = dev->data->dev_private; 2184 rte_free(dev->security_ctx); 2185 2186 /* If any descriptors in flight , poll and wait 2187 * until all descriptors are received and silently discarded. 2188 */ 2189 if (internals) { 2190 shutdown_job_ring(internals); 2191 close_job_ring(internals); 2192 rte_mempool_free(internals->ctx_pool); 2193 } 2194 2195 CAAM_JR_INFO("Closing crypto device %s", dev->data->name); 2196 2197 /* last caam jr instance) */ 2198 if (g_job_rings_no == 0) 2199 g_driver_state = SEC_DRIVER_STATE_IDLE; 2200 2201 return SEC_SUCCESS; 2202 } 2203 2204 /* @brief Initialize the software and hardware resources tied to a job ring. 2205 * @param [in] jr_mode; Model to be used by SEC Driver to receive 2206 * notifications from SEC. Can be either 2207 * of the three: #SEC_NOTIFICATION_TYPE_NAPI 2208 * #SEC_NOTIFICATION_TYPE_IRQ or 2209 * #SEC_NOTIFICATION_TYPE_POLL 2210 * @param [in] NAPI_mode The NAPI work mode to configure a job ring at 2211 * startup. Used only when #SEC_NOTIFICATION_TYPE 2212 * is set to #SEC_NOTIFICATION_TYPE_NAPI. 2213 * @param [in] irq_coalescing_timer This value determines the maximum 2214 * amount of time after processing a 2215 * descriptor before raising an interrupt. 2216 * @param [in] irq_coalescing_count This value determines how many 2217 * descriptors are completed before 2218 * raising an interrupt. 2219 * @param [in] reg_base_addr, The job ring base address register 2220 * @param [in] irq_id The job ring interrupt identification number. 2221 * @retval job_ring_handle for successful job ring configuration 2222 * @retval NULL on error 2223 * 2224 */ 2225 static void * 2226 init_job_ring(void *reg_base_addr, uint32_t irq_id) 2227 { 2228 struct sec_job_ring_t *job_ring = NULL; 2229 int i, ret = 0; 2230 int jr_mode = SEC_NOTIFICATION_TYPE_POLL; 2231 int napi_mode = 0; 2232 int irq_coalescing_timer = 0; 2233 int irq_coalescing_count = 0; 2234 2235 for (i = 0; i < MAX_SEC_JOB_RINGS; i++) { 2236 if (g_job_rings[i].irq_fd == 0) { 2237 job_ring = &g_job_rings[i]; 2238 g_job_rings_no++; 2239 break; 2240 } 2241 } 2242 if (job_ring == NULL) { 2243 CAAM_JR_ERR("No free job ring\n"); 2244 return NULL; 2245 } 2246 2247 job_ring->register_base_addr = reg_base_addr; 2248 job_ring->jr_mode = jr_mode; 2249 job_ring->napi_mode = 0; 2250 job_ring->irq_fd = irq_id; 2251 2252 /* Allocate mem for input and output ring */ 2253 2254 /* Allocate memory for input ring */ 2255 job_ring->input_ring = caam_jr_dma_mem_alloc(L1_CACHE_BYTES, 2256 SEC_DMA_MEM_INPUT_RING_SIZE); 2257 memset(job_ring->input_ring, 0, SEC_DMA_MEM_INPUT_RING_SIZE); 2258 2259 /* Allocate memory for output ring */ 2260 job_ring->output_ring = caam_jr_dma_mem_alloc(L1_CACHE_BYTES, 2261 SEC_DMA_MEM_OUTPUT_RING_SIZE); 2262 memset(job_ring->output_ring, 0, SEC_DMA_MEM_OUTPUT_RING_SIZE); 2263 2264 /* Reset job ring in SEC hw and configure job ring registers */ 2265 ret = hw_reset_job_ring(job_ring); 2266 if (ret != 0) { 2267 CAAM_JR_ERR("Failed to reset hardware job ring"); 2268 goto cleanup; 2269 } 2270 2271 if (jr_mode == SEC_NOTIFICATION_TYPE_NAPI) { 2272 /* When SEC US driver works in NAPI mode, the UA can select 2273 * if the driver starts with IRQs on or off. 2274 */ 2275 if (napi_mode == SEC_STARTUP_INTERRUPT_MODE) { 2276 CAAM_JR_INFO("Enabling DONE IRQ generationon job ring - %p", 2277 job_ring); 2278 ret = caam_jr_enable_irqs(job_ring->irq_fd); 2279 if (ret != 0) { 2280 CAAM_JR_ERR("Failed to enable irqs for job ring"); 2281 goto cleanup; 2282 } 2283 } 2284 } else if (jr_mode == SEC_NOTIFICATION_TYPE_IRQ) { 2285 /* When SEC US driver works in pure interrupt mode, 2286 * IRQ's are always enabled. 2287 */ 2288 CAAM_JR_INFO("Enabling DONE IRQ generation on job ring - %p", 2289 job_ring); 2290 ret = caam_jr_enable_irqs(job_ring->irq_fd); 2291 if (ret != 0) { 2292 CAAM_JR_ERR("Failed to enable irqs for job ring"); 2293 goto cleanup; 2294 } 2295 } 2296 if (irq_coalescing_timer || irq_coalescing_count) { 2297 hw_job_ring_set_coalescing_param(job_ring, 2298 irq_coalescing_timer, 2299 irq_coalescing_count); 2300 2301 hw_job_ring_enable_coalescing(job_ring); 2302 job_ring->coalescing_en = 1; 2303 } 2304 2305 job_ring->jr_state = SEC_JOB_RING_STATE_STARTED; 2306 job_ring->max_nb_queue_pairs = RTE_CAAM_MAX_NB_SEC_QPS; 2307 job_ring->max_nb_sessions = RTE_CAAM_JR_PMD_MAX_NB_SESSIONS; 2308 2309 return job_ring; 2310 cleanup: 2311 caam_jr_dma_free(job_ring->output_ring); 2312 caam_jr_dma_free(job_ring->input_ring); 2313 return NULL; 2314 } 2315 2316 2317 static int 2318 caam_jr_dev_init(const char *name, 2319 struct rte_vdev_device *vdev, 2320 struct rte_cryptodev_pmd_init_params *init_params) 2321 { 2322 struct rte_cryptodev *dev; 2323 struct rte_security_ctx *security_instance; 2324 struct uio_job_ring *job_ring; 2325 char str[RTE_CRYPTODEV_NAME_MAX_LEN]; 2326 2327 PMD_INIT_FUNC_TRACE(); 2328 2329 /* Validate driver state */ 2330 if (g_driver_state == SEC_DRIVER_STATE_IDLE) { 2331 g_job_rings_max = sec_configure(); 2332 if (!g_job_rings_max) { 2333 CAAM_JR_ERR("No job ring detected on UIO !!!!"); 2334 return -1; 2335 } 2336 /* Update driver state */ 2337 g_driver_state = SEC_DRIVER_STATE_STARTED; 2338 } 2339 2340 if (g_job_rings_no >= g_job_rings_max) { 2341 CAAM_JR_ERR("No more job rings available max=%d!!!!", 2342 g_job_rings_max); 2343 return -1; 2344 } 2345 2346 job_ring = config_job_ring(); 2347 if (job_ring == NULL) { 2348 CAAM_JR_ERR("failed to create job ring"); 2349 goto init_error; 2350 } 2351 2352 snprintf(str, sizeof(str), "caam_jr%d", job_ring->jr_id); 2353 2354 dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params); 2355 if (dev == NULL) { 2356 CAAM_JR_ERR("failed to create cryptodev vdev"); 2357 goto cleanup; 2358 } 2359 /*TODO free it during teardown*/ 2360 dev->data->dev_private = init_job_ring(job_ring->register_base_addr, 2361 job_ring->uio_fd); 2362 2363 if (!dev->data->dev_private) { 2364 CAAM_JR_ERR("Ring memory allocation failed\n"); 2365 goto cleanup2; 2366 } 2367 2368 dev->driver_id = cryptodev_driver_id; 2369 dev->dev_ops = &caam_jr_ops; 2370 2371 /* register rx/tx burst functions for data path */ 2372 dev->dequeue_burst = caam_jr_dequeue_burst; 2373 dev->enqueue_burst = caam_jr_enqueue_burst; 2374 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 2375 RTE_CRYPTODEV_FF_HW_ACCELERATED | 2376 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 2377 RTE_CRYPTODEV_FF_SECURITY | 2378 RTE_CRYPTODEV_FF_IN_PLACE_SGL | 2379 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | 2380 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | 2381 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | 2382 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; 2383 2384 /* For secondary processes, we don't initialise any further as primary 2385 * has already done this work. Only check we don't need a different 2386 * RX function 2387 */ 2388 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2389 CAAM_JR_WARN("Device already init by primary process"); 2390 return 0; 2391 } 2392 2393 /*TODO free it during teardown*/ 2394 security_instance = rte_malloc("caam_jr", 2395 sizeof(struct rte_security_ctx), 0); 2396 if (security_instance == NULL) { 2397 CAAM_JR_ERR("memory allocation failed\n"); 2398 //todo error handling. 2399 goto cleanup2; 2400 } 2401 2402 security_instance->device = (void *)dev; 2403 security_instance->ops = &caam_jr_security_ops; 2404 security_instance->sess_cnt = 0; 2405 dev->security_ctx = security_instance; 2406 2407 RTE_LOG(INFO, PMD, "%s cryptodev init\n", dev->data->name); 2408 2409 return 0; 2410 2411 cleanup2: 2412 caam_jr_dev_uninit(dev); 2413 rte_cryptodev_pmd_release_device(dev); 2414 cleanup: 2415 free_job_ring(job_ring->uio_fd); 2416 init_error: 2417 CAAM_JR_ERR("driver %s: cryptodev_caam_jr_create failed", 2418 init_params->name); 2419 2420 return -ENXIO; 2421 } 2422 2423 /** Initialise CAAM JR crypto device */ 2424 static int 2425 cryptodev_caam_jr_probe(struct rte_vdev_device *vdev) 2426 { 2427 struct rte_cryptodev_pmd_init_params init_params = { 2428 "", 2429 sizeof(struct sec_job_ring_t), 2430 rte_socket_id(), 2431 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS 2432 }; 2433 const char *name; 2434 const char *input_args; 2435 2436 name = rte_vdev_device_name(vdev); 2437 if (name == NULL) 2438 return -EINVAL; 2439 2440 input_args = rte_vdev_device_args(vdev); 2441 rte_cryptodev_pmd_parse_input_args(&init_params, input_args); 2442 2443 /* if sec device version is not configured */ 2444 if (!rta_get_sec_era()) { 2445 const struct device_node *caam_node; 2446 2447 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") { 2448 const uint32_t *prop = of_get_property(caam_node, 2449 "fsl,sec-era", 2450 NULL); 2451 if (prop) { 2452 rta_set_sec_era( 2453 INTL_SEC_ERA(cpu_to_caam32(*prop))); 2454 break; 2455 } 2456 } 2457 } 2458 #ifdef RTE_LIBRTE_PMD_CAAM_JR_BE 2459 if (rta_get_sec_era() > RTA_SEC_ERA_8) { 2460 RTE_LOG(ERR, PMD, 2461 "CAAM is compiled in BE mode for device with sec era > 8???\n"); 2462 return -EINVAL; 2463 } 2464 #endif 2465 2466 return caam_jr_dev_init(name, vdev, &init_params); 2467 } 2468 2469 /** Uninitialise CAAM JR crypto device */ 2470 static int 2471 cryptodev_caam_jr_remove(struct rte_vdev_device *vdev) 2472 { 2473 struct rte_cryptodev *cryptodev; 2474 const char *name; 2475 2476 name = rte_vdev_device_name(vdev); 2477 if (name == NULL) 2478 return -EINVAL; 2479 2480 cryptodev = rte_cryptodev_pmd_get_named_dev(name); 2481 if (cryptodev == NULL) 2482 return -ENODEV; 2483 2484 caam_jr_dev_uninit(cryptodev); 2485 2486 return rte_cryptodev_pmd_destroy(cryptodev); 2487 } 2488 2489 static struct rte_vdev_driver cryptodev_caam_jr_drv = { 2490 .probe = cryptodev_caam_jr_probe, 2491 .remove = cryptodev_caam_jr_remove 2492 }; 2493 2494 static struct cryptodev_driver caam_jr_crypto_drv; 2495 2496 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CAAM_JR_PMD, cryptodev_caam_jr_drv); 2497 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CAAM_JR_PMD, 2498 "max_nb_queue_pairs=<int>" 2499 "socket_id=<int>"); 2500 RTE_PMD_REGISTER_CRYPTO_DRIVER(caam_jr_crypto_drv, cryptodev_caam_jr_drv.driver, 2501 cryptodev_driver_id); 2502 2503 RTE_INIT(caam_jr_init_log) 2504 { 2505 caam_jr_logtype = rte_log_register("pmd.crypto.caam"); 2506 if (caam_jr_logtype >= 0) 2507 rte_log_set_level(caam_jr_logtype, RTE_LOG_NOTICE); 2508 } 2509