1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2021-2024 Advanced Micro Devices, Inc. 3 */ 4 5 #include <inttypes.h> 6 7 #include <rte_common.h> 8 #include <rte_malloc.h> 9 #include <rte_bitops.h> 10 11 #include "ionic_crypto.h" 12 13 static int 14 iocpt_cq_init(struct iocpt_cq *cq, uint16_t num_descs) 15 { 16 if (!rte_is_power_of_2(num_descs) || 17 num_descs < IOCPT_MIN_RING_DESC || 18 num_descs > IOCPT_MAX_RING_DESC) { 19 IOCPT_PRINT(ERR, "%u descriptors (min: %u max: %u)", 20 num_descs, IOCPT_MIN_RING_DESC, IOCPT_MAX_RING_DESC); 21 return -EINVAL; 22 } 23 24 cq->num_descs = num_descs; 25 cq->size_mask = num_descs - 1; 26 cq->tail_idx = 0; 27 cq->done_color = 1; 28 29 return 0; 30 } 31 32 static void 33 iocpt_cq_reset(struct iocpt_cq *cq) 34 { 35 cq->tail_idx = 0; 36 cq->done_color = 1; 37 38 memset(cq->base, 0, sizeof(struct iocpt_nop_comp) * cq->num_descs); 39 } 40 41 static void 42 iocpt_cq_map(struct iocpt_cq *cq, void *base, rte_iova_t base_pa) 43 { 44 cq->base = base; 45 cq->base_pa = base_pa; 46 } 47 48 uint32_t 49 iocpt_cq_service(struct iocpt_cq *cq, uint32_t work_to_do, 50 iocpt_cq_cb cb, void *cb_arg) 51 { 52 uint32_t work_done = 0; 53 54 if (work_to_do == 0) 55 return 0; 56 57 while (cb(cq, cq->tail_idx, cb_arg)) { 58 cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); 59 if (cq->tail_idx == 0) 60 cq->done_color = !cq->done_color; 61 62 if (++work_done == work_to_do) 63 break; 64 } 65 66 return work_done; 67 } 68 69 static int 70 iocpt_q_init(struct iocpt_queue *q, uint8_t type, uint32_t index, 71 uint16_t num_descs, uint16_t num_segs, uint32_t socket_id) 72 { 73 uint32_t ring_size; 74 75 if (!rte_is_power_of_2(num_descs)) 76 return -EINVAL; 77 78 ring_size = rte_log2_u32(num_descs); 79 if (ring_size < 2 || ring_size > 16) 80 return -EINVAL; 81 82 q->type = type; 83 q->index = index; 84 q->num_descs = num_descs; 85 q->num_segs = num_segs; 86 q->size_mask = num_descs - 1; 87 q->head_idx = 0; 88 q->tail_idx = 0; 89 90 q->info = rte_calloc_socket("iocpt", 91 (uint64_t)num_descs * num_segs, sizeof(void *), 92 rte_mem_page_size(), socket_id); 93 if (q->info == NULL) { 94 IOCPT_PRINT(ERR, "Cannot allocate queue info"); 95 return -ENOMEM; 96 } 97 98 return 0; 99 } 100 101 static void 102 iocpt_q_reset(struct iocpt_queue *q) 103 { 104 q->head_idx = 0; 105 q->tail_idx = 0; 106 } 107 108 static void 109 iocpt_q_map(struct iocpt_queue *q, void *base, rte_iova_t base_pa) 110 { 111 q->base = base; 112 q->base_pa = base_pa; 113 } 114 115 static void 116 iocpt_q_sg_map(struct iocpt_queue *q, void *base, rte_iova_t base_pa) 117 { 118 q->sg_base = base; 119 q->sg_base_pa = base_pa; 120 } 121 122 static void 123 iocpt_q_free(struct iocpt_queue *q) 124 { 125 if (q->info != NULL) { 126 rte_free(q->info); 127 q->info = NULL; 128 } 129 } 130 131 static void 132 iocpt_get_abs_stats(const struct iocpt_dev *dev, 133 struct rte_cryptodev_stats *stats) 134 { 135 uint32_t i; 136 137 memset(stats, 0, sizeof(*stats)); 138 139 /* Sum up the per-queue stats counters */ 140 for (i = 0; i < dev->crypto_dev->data->nb_queue_pairs; i++) { 141 struct rte_cryptodev_stats *q_stats = &dev->cryptoqs[i]->stats; 142 143 stats->enqueued_count += q_stats->enqueued_count; 144 stats->dequeued_count += q_stats->dequeued_count; 145 stats->enqueue_err_count += q_stats->enqueue_err_count; 146 stats->dequeue_err_count += q_stats->dequeue_err_count; 147 } 148 } 149 150 void 151 iocpt_get_stats(const struct iocpt_dev *dev, struct rte_cryptodev_stats *stats) 152 { 153 /* Retrieve the new absolute stats values */ 154 iocpt_get_abs_stats(dev, stats); 155 156 /* Subtract the base stats values to get relative values */ 157 stats->enqueued_count -= dev->stats_base.enqueued_count; 158 stats->dequeued_count -= dev->stats_base.dequeued_count; 159 stats->enqueue_err_count -= dev->stats_base.enqueue_err_count; 160 stats->dequeue_err_count -= dev->stats_base.dequeue_err_count; 161 } 162 163 void 164 iocpt_reset_stats(struct iocpt_dev *dev) 165 { 166 uint32_t i; 167 168 /* Erase the per-queue stats counters */ 169 for (i = 0; i < dev->crypto_dev->data->nb_queue_pairs; i++) 170 memset(&dev->cryptoqs[i]->stats, 0, 171 sizeof(dev->cryptoqs[i]->stats)); 172 173 /* Update the base stats values */ 174 iocpt_get_abs_stats(dev, &dev->stats_base); 175 } 176 177 static int 178 iocpt_session_write(struct iocpt_session_priv *priv, 179 enum iocpt_sess_control_oper oper) 180 { 181 struct iocpt_dev *dev = priv->dev; 182 struct iocpt_admin_ctx ctx = { 183 .pending_work = true, 184 .cmd.sess_control = { 185 .opcode = IOCPT_CMD_SESS_CONTROL, 186 .type = priv->type, 187 .oper = oper, 188 .index = rte_cpu_to_le_32(priv->index), 189 .key_len = rte_cpu_to_le_16(priv->key_len), 190 .key_seg_len = (uint8_t)RTE_MIN(priv->key_len, 191 IOCPT_SESS_KEY_SEG_LEN), 192 }, 193 }; 194 struct iocpt_sess_control_cmd *cmd = &ctx.cmd.sess_control; 195 uint16_t key_offset; 196 uint8_t key_segs, seg, seg_len; 197 int err; 198 199 key_segs = ((priv->key_len - 1) >> IOCPT_SESS_KEY_SEG_SHFT) + 1; 200 201 for (seg = 0; seg < key_segs; seg++) { 202 ctx.pending_work = true; 203 204 key_offset = seg * cmd->key_seg_len; 205 seg_len = (uint8_t)RTE_MIN(priv->key_len - key_offset, 206 IOCPT_SESS_KEY_SEG_LEN); 207 memcpy(cmd->key, &priv->key[key_offset], seg_len); 208 cmd->key_seg_idx = seg; 209 210 /* Mark final segment */ 211 if (seg + 1 == key_segs) 212 cmd->flags |= rte_cpu_to_le_16(IOCPT_SCTL_F_END); 213 214 err = iocpt_adminq_post_wait(dev, &ctx); 215 if (err != 0) 216 return err; 217 } 218 219 return 0; 220 } 221 222 static int 223 iocpt_session_wdog(struct iocpt_dev *dev) 224 { 225 struct iocpt_session_priv priv = { 226 .dev = dev, 227 .index = IOCPT_Q_WDOG_SESS_IDX, 228 .type = IOCPT_SESS_AEAD_AES_GCM, 229 .key_len = IOCPT_Q_WDOG_KEY_LEN, 230 }; 231 232 /* Reserve session 0 for queue watchdog */ 233 rte_bitmap_clear(dev->sess_bm, IOCPT_Q_WDOG_SESS_IDX); 234 235 return iocpt_session_write(&priv, IOCPT_SESS_INIT); 236 } 237 238 int 239 iocpt_session_init(struct iocpt_session_priv *priv) 240 { 241 struct iocpt_dev *dev = priv->dev; 242 uint64_t bm_slab = 0; 243 uint32_t bm_pos = 0; 244 int err = 0; 245 246 rte_spinlock_lock(&dev->adminq_lock); 247 248 if (rte_bitmap_scan(dev->sess_bm, &bm_pos, &bm_slab) > 0) { 249 priv->index = bm_pos + rte_ctz64(bm_slab); 250 rte_bitmap_clear(dev->sess_bm, priv->index); 251 } else 252 err = -ENOSPC; 253 254 rte_spinlock_unlock(&dev->adminq_lock); 255 256 if (err != 0) { 257 IOCPT_PRINT(ERR, "session index space exhausted"); 258 return err; 259 } 260 261 err = iocpt_session_write(priv, IOCPT_SESS_INIT); 262 if (err != 0) { 263 rte_spinlock_lock(&dev->adminq_lock); 264 rte_bitmap_set(dev->sess_bm, priv->index); 265 rte_spinlock_unlock(&dev->adminq_lock); 266 return err; 267 } 268 269 priv->flags |= IOCPT_S_F_INITED; 270 271 return 0; 272 } 273 274 int 275 iocpt_session_update(struct iocpt_session_priv *priv) 276 { 277 return iocpt_session_write(priv, IOCPT_SESS_UPDATE_KEY); 278 } 279 280 void 281 iocpt_session_deinit(struct iocpt_session_priv *priv) 282 { 283 struct iocpt_dev *dev = priv->dev; 284 struct iocpt_admin_ctx ctx = { 285 .pending_work = true, 286 .cmd.sess_control = { 287 .opcode = IOCPT_CMD_SESS_CONTROL, 288 .type = priv->type, 289 .oper = IOCPT_SESS_DISABLE, 290 .index = rte_cpu_to_le_32(priv->index), 291 .key_len = rte_cpu_to_le_16(priv->key_len), 292 }, 293 }; 294 295 (void)iocpt_adminq_post_wait(dev, &ctx); 296 297 rte_spinlock_lock(&dev->adminq_lock); 298 rte_bitmap_set(dev->sess_bm, priv->index); 299 rte_spinlock_unlock(&dev->adminq_lock); 300 301 priv->flags &= ~IOCPT_S_F_INITED; 302 } 303 304 static const struct rte_memzone * 305 iocpt_dma_zone_reserve(const char *type_name, uint16_t qid, size_t size, 306 unsigned int align, int socket_id) 307 { 308 char zone_name[RTE_MEMZONE_NAMESIZE]; 309 const struct rte_memzone *mz; 310 int err; 311 312 err = snprintf(zone_name, sizeof(zone_name), 313 "iocpt_%s_%u", type_name, qid); 314 if (err >= RTE_MEMZONE_NAMESIZE) { 315 IOCPT_PRINT(ERR, "Name %s too long", type_name); 316 return NULL; 317 } 318 319 mz = rte_memzone_lookup(zone_name); 320 if (mz != NULL) 321 return mz; 322 323 return rte_memzone_reserve_aligned(zone_name, size, socket_id, 324 RTE_MEMZONE_IOVA_CONTIG, align); 325 } 326 327 static int 328 iocpt_commonq_alloc(struct iocpt_dev *dev, 329 uint8_t type, 330 size_t struct_size, 331 uint32_t socket_id, 332 uint32_t index, 333 const char *type_name, 334 uint16_t flags, 335 uint16_t num_descs, 336 uint16_t num_segs, 337 uint16_t desc_size, 338 uint16_t cq_desc_size, 339 uint16_t sg_desc_size, 340 struct iocpt_common_q **comq) 341 { 342 struct iocpt_common_q *new; 343 uint32_t q_size, cq_size, sg_size, total_size; 344 void *q_base, *cq_base, *sg_base; 345 rte_iova_t q_base_pa = 0; 346 rte_iova_t cq_base_pa = 0; 347 rte_iova_t sg_base_pa = 0; 348 size_t page_size = rte_mem_page_size(); 349 int err; 350 351 *comq = NULL; 352 353 q_size = num_descs * desc_size; 354 cq_size = num_descs * cq_desc_size; 355 sg_size = num_descs * sg_desc_size; 356 357 /* 358 * Note: aligning q_size/cq_size is not enough due to cq_base address 359 * aligning as q_base could be not aligned to the page. 360 * Adding page_size. 361 */ 362 total_size = RTE_ALIGN(q_size, page_size) + 363 RTE_ALIGN(cq_size, page_size) + page_size; 364 if (flags & IOCPT_Q_F_SG) 365 total_size += RTE_ALIGN(sg_size, page_size) + page_size; 366 367 new = rte_zmalloc_socket("iocpt", struct_size, 368 RTE_CACHE_LINE_SIZE, socket_id); 369 if (new == NULL) { 370 IOCPT_PRINT(ERR, "Cannot allocate queue structure"); 371 return -ENOMEM; 372 } 373 374 new->dev = dev; 375 376 err = iocpt_q_init(&new->q, type, index, num_descs, num_segs, 377 socket_id); 378 if (err != 0) { 379 IOCPT_PRINT(ERR, "Queue initialization failed"); 380 goto err_free_q; 381 } 382 383 err = iocpt_cq_init(&new->cq, num_descs); 384 if (err != 0) { 385 IOCPT_PRINT(ERR, "Completion queue initialization failed"); 386 goto err_deinit_q; 387 } 388 389 new->base_z = iocpt_dma_zone_reserve(type_name, index, total_size, 390 IONIC_ALIGN, socket_id); 391 if (new->base_z == NULL) { 392 IOCPT_PRINT(ERR, "Cannot reserve queue DMA memory"); 393 err = -ENOMEM; 394 goto err_deinit_cq; 395 } 396 397 new->base = new->base_z->addr; 398 new->base_pa = new->base_z->iova; 399 400 q_base = new->base; 401 q_base_pa = new->base_pa; 402 iocpt_q_map(&new->q, q_base, q_base_pa); 403 404 cq_base = (void *)RTE_ALIGN((uintptr_t)q_base + q_size, page_size); 405 cq_base_pa = RTE_ALIGN(q_base_pa + q_size, page_size); 406 iocpt_cq_map(&new->cq, cq_base, cq_base_pa); 407 408 if (flags & IOCPT_Q_F_SG) { 409 sg_base = (void *)RTE_ALIGN((uintptr_t)cq_base + cq_size, 410 page_size); 411 sg_base_pa = RTE_ALIGN(cq_base_pa + cq_size, page_size); 412 iocpt_q_sg_map(&new->q, sg_base, sg_base_pa); 413 } 414 415 IOCPT_PRINT(DEBUG, "q_base_pa %#jx cq_base_pa %#jx sg_base_pa %#jx", 416 q_base_pa, cq_base_pa, sg_base_pa); 417 418 *comq = new; 419 420 return 0; 421 422 err_deinit_cq: 423 err_deinit_q: 424 iocpt_q_free(&new->q); 425 err_free_q: 426 rte_free(new); 427 return err; 428 } 429 430 int 431 iocpt_cryptoq_alloc(struct iocpt_dev *dev, uint32_t socket_id, uint32_t index, 432 uint16_t num_descs) 433 { 434 struct iocpt_crypto_q *cptq; 435 uint16_t flags = 0; 436 int err; 437 438 /* CryptoQ always supports scatter-gather */ 439 flags |= IOCPT_Q_F_SG; 440 441 IOCPT_PRINT(DEBUG, "cptq %u num_descs %u num_segs %u", 442 index, num_descs, 1); 443 444 err = iocpt_commonq_alloc(dev, 445 IOCPT_QTYPE_CRYPTOQ, 446 sizeof(struct iocpt_crypto_q), 447 socket_id, 448 index, 449 "crypto", 450 flags, 451 num_descs, 452 1, 453 sizeof(struct iocpt_crypto_desc), 454 sizeof(struct iocpt_crypto_comp), 455 sizeof(struct iocpt_crypto_sg_desc), 456 (struct iocpt_common_q **)&cptq); 457 if (err != 0) 458 return err; 459 460 cptq->flags = flags; 461 462 dev->cryptoqs[index] = cptq; 463 464 return 0; 465 } 466 467 struct ionic_doorbell * 468 iocpt_db_map(struct iocpt_dev *dev, struct iocpt_queue *q) 469 { 470 return dev->db_pages + q->hw_type; 471 } 472 473 static int 474 iocpt_cryptoq_init(struct iocpt_crypto_q *cptq) 475 { 476 struct iocpt_queue *q = &cptq->q; 477 struct iocpt_dev *dev = cptq->dev; 478 struct iocpt_cq *cq = &cptq->cq; 479 struct iocpt_admin_ctx ctx = { 480 .pending_work = true, 481 .cmd.q_init = { 482 .opcode = IOCPT_CMD_Q_INIT, 483 .type = IOCPT_QTYPE_CRYPTOQ, 484 .ver = dev->qtype_info[IOCPT_QTYPE_CRYPTOQ].version, 485 .index = rte_cpu_to_le_32(q->index), 486 .flags = rte_cpu_to_le_16(IOCPT_QINIT_F_ENA | 487 IOCPT_QINIT_F_SG), 488 .intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE), 489 .ring_size = rte_log2_u32(q->num_descs), 490 .ring_base = rte_cpu_to_le_64(q->base_pa), 491 .cq_ring_base = rte_cpu_to_le_64(cq->base_pa), 492 .sg_ring_base = rte_cpu_to_le_64(q->sg_base_pa), 493 }, 494 }; 495 int err; 496 497 IOCPT_PRINT(DEBUG, "cptq_init.index %d", q->index); 498 IOCPT_PRINT(DEBUG, "cptq_init.ring_base %#jx", q->base_pa); 499 IOCPT_PRINT(DEBUG, "cptq_init.ring_size %d", 500 ctx.cmd.q_init.ring_size); 501 IOCPT_PRINT(DEBUG, "cptq_init.ver %u", ctx.cmd.q_init.ver); 502 503 iocpt_q_reset(q); 504 iocpt_cq_reset(cq); 505 506 err = iocpt_adminq_post_wait(dev, &ctx); 507 if (err != 0) 508 return err; 509 510 q->hw_type = ctx.comp.q_init.hw_type; 511 q->hw_index = rte_le_to_cpu_32(ctx.comp.q_init.hw_index); 512 q->db = iocpt_db_map(dev, q); 513 514 IOCPT_PRINT(DEBUG, "cptq->hw_type %d", q->hw_type); 515 IOCPT_PRINT(DEBUG, "cptq->hw_index %d", q->hw_index); 516 IOCPT_PRINT(DEBUG, "cptq->db %p", q->db); 517 518 cptq->flags |= IOCPT_Q_F_INITED; 519 520 return 0; 521 } 522 523 static void 524 iocpt_cryptoq_deinit(struct iocpt_crypto_q *cptq) 525 { 526 struct iocpt_dev *dev = cptq->dev; 527 struct iocpt_admin_ctx ctx = { 528 .pending_work = true, 529 .cmd.q_control = { 530 .opcode = IOCPT_CMD_Q_CONTROL, 531 .type = IOCPT_QTYPE_CRYPTOQ, 532 .index = rte_cpu_to_le_32(cptq->q.index), 533 .oper = IOCPT_Q_DISABLE, 534 }, 535 }; 536 unsigned long sleep_usec = 100UL * 1000; 537 uint32_t sleep_cnt, sleep_max = IOCPT_CRYPTOQ_WAIT; 538 int err; 539 540 for (sleep_cnt = 0; sleep_cnt < sleep_max; sleep_cnt++) { 541 ctx.pending_work = true; 542 543 err = iocpt_adminq_post_wait(dev, &ctx); 544 if (err != -EAGAIN) 545 break; 546 547 rte_delay_us_block(sleep_usec); 548 } 549 550 if (err != 0) 551 IOCPT_PRINT(ERR, "Deinit queue %u returned %d after %u ms", 552 cptq->q.index, err, sleep_cnt * 100); 553 else 554 IOCPT_PRINT(DEBUG, "Deinit queue %u returned %d after %u ms", 555 cptq->q.index, err, sleep_cnt * 100); 556 557 IOCPT_PRINT(DEBUG, "Queue %u watchdog: enq %"PRIu64" deq %"PRIu64, 558 cptq->q.index, cptq->enqueued_wdogs, cptq->dequeued_wdogs); 559 560 cptq->flags &= ~IOCPT_Q_F_INITED; 561 } 562 563 void 564 iocpt_cryptoq_free(struct iocpt_crypto_q *cptq) 565 { 566 if (cptq == NULL) 567 return; 568 569 if (cptq->base_z != NULL) { 570 rte_memzone_free(cptq->base_z); 571 cptq->base_z = NULL; 572 cptq->base = NULL; 573 cptq->base_pa = 0; 574 } 575 576 iocpt_q_free(&cptq->q); 577 578 rte_free(cptq); 579 } 580 581 static int 582 iocpt_adminq_alloc(struct iocpt_dev *dev) 583 { 584 struct iocpt_admin_q *aq; 585 uint16_t num_descs = IOCPT_ADMINQ_LENGTH; 586 uint16_t flags = 0; 587 int err; 588 589 err = iocpt_commonq_alloc(dev, 590 IOCPT_QTYPE_ADMINQ, 591 sizeof(struct iocpt_admin_q), 592 rte_socket_id(), 593 0, 594 "admin", 595 flags, 596 num_descs, 597 1, 598 sizeof(struct iocpt_admin_cmd), 599 sizeof(struct iocpt_admin_comp), 600 0, 601 (struct iocpt_common_q **)&aq); 602 if (err != 0) 603 return err; 604 605 aq->flags = flags; 606 607 dev->adminq = aq; 608 609 return 0; 610 } 611 612 static int 613 iocpt_adminq_init(struct iocpt_dev *dev) 614 { 615 return iocpt_dev_adminq_init(dev); 616 } 617 618 static void 619 iocpt_adminq_deinit(struct iocpt_dev *dev) 620 { 621 dev->adminq->flags &= ~IOCPT_Q_F_INITED; 622 } 623 624 static void 625 iocpt_adminq_free(struct iocpt_admin_q *aq) 626 { 627 if (aq->base_z != NULL) { 628 rte_memzone_free(aq->base_z); 629 aq->base_z = NULL; 630 aq->base = NULL; 631 aq->base_pa = 0; 632 } 633 634 iocpt_q_free(&aq->q); 635 636 rte_free(aq); 637 } 638 639 static int 640 iocpt_alloc_objs(struct iocpt_dev *dev) 641 { 642 uint32_t bmsize, i; 643 uint8_t *bm; 644 int err; 645 646 IOCPT_PRINT(DEBUG, "Crypto: %s", dev->name); 647 648 dev->cryptoqs = rte_calloc_socket("iocpt", 649 dev->max_qps, sizeof(*dev->cryptoqs), 650 RTE_CACHE_LINE_SIZE, dev->socket_id); 651 if (dev->cryptoqs == NULL) { 652 IOCPT_PRINT(ERR, "Cannot allocate tx queues array"); 653 return -ENOMEM; 654 } 655 656 rte_spinlock_init(&dev->adminq_lock); 657 rte_spinlock_init(&dev->adminq_service_lock); 658 659 err = iocpt_adminq_alloc(dev); 660 if (err != 0) { 661 IOCPT_PRINT(ERR, "Cannot allocate admin queue"); 662 err = -ENOMEM; 663 goto err_free_cryptoqs; 664 } 665 666 dev->info_sz = RTE_ALIGN(sizeof(*dev->info), rte_mem_page_size()); 667 dev->info_z = iocpt_dma_zone_reserve("info", 0, dev->info_sz, 668 IONIC_ALIGN, dev->socket_id); 669 if (dev->info_z == NULL) { 670 IOCPT_PRINT(ERR, "Cannot allocate dev info memory"); 671 err = -ENOMEM; 672 goto err_free_adminq; 673 } 674 675 dev->info = dev->info_z->addr; 676 dev->info_pa = dev->info_z->iova; 677 678 bmsize = rte_bitmap_get_memory_footprint(dev->max_sessions); 679 bm = rte_malloc_socket("iocpt", bmsize, 680 RTE_CACHE_LINE_SIZE, dev->socket_id); 681 if (bm == NULL) { 682 IOCPT_PRINT(ERR, "Cannot allocate %uB bitmap memory", bmsize); 683 err = -ENOMEM; 684 goto err_free_dmazone; 685 } 686 687 dev->sess_bm = rte_bitmap_init(dev->max_sessions, bm, bmsize); 688 if (dev->sess_bm == NULL) { 689 IOCPT_PRINT(ERR, "Cannot initialize bitmap"); 690 err = -EFAULT; 691 goto err_free_bm; 692 } 693 for (i = 0; i < dev->max_sessions; i++) 694 rte_bitmap_set(dev->sess_bm, i); 695 696 return 0; 697 698 err_free_bm: 699 rte_free(bm); 700 err_free_dmazone: 701 rte_memzone_free(dev->info_z); 702 dev->info_z = NULL; 703 dev->info = NULL; 704 dev->info_pa = 0; 705 err_free_adminq: 706 iocpt_adminq_free(dev->adminq); 707 dev->adminq = NULL; 708 err_free_cryptoqs: 709 rte_free(dev->cryptoqs); 710 dev->cryptoqs = NULL; 711 return err; 712 } 713 714 static int 715 iocpt_init(struct iocpt_dev *dev) 716 { 717 int err; 718 719 memset(&dev->stats_base, 0, sizeof(dev->stats_base)); 720 721 /* Uses dev_cmds */ 722 err = iocpt_dev_init(dev, dev->info_pa); 723 if (err != 0) 724 return err; 725 726 err = iocpt_adminq_init(dev); 727 if (err != 0) 728 return err; 729 730 /* Write the queue watchdog key */ 731 err = iocpt_session_wdog(dev); 732 if (err != 0) { 733 IOCPT_PRINT(ERR, "Cannot setup watchdog session"); 734 goto err_out_adminq_deinit; 735 } 736 737 dev->state |= IOCPT_DEV_F_INITED; 738 739 return 0; 740 741 err_out_adminq_deinit: 742 iocpt_adminq_deinit(dev); 743 744 return err; 745 } 746 747 void 748 iocpt_configure(struct iocpt_dev *dev) 749 { 750 RTE_SET_USED(dev); 751 } 752 753 int 754 iocpt_start(struct iocpt_dev *dev) 755 { 756 uint32_t i; 757 int err; 758 759 IOCPT_PRINT(DEBUG, "Starting %u queues", 760 dev->crypto_dev->data->nb_queue_pairs); 761 762 for (i = 0; i < dev->crypto_dev->data->nb_queue_pairs; i++) { 763 err = iocpt_cryptoq_init(dev->cryptoqs[i]); 764 if (err != 0) 765 return err; 766 } 767 768 dev->state |= IOCPT_DEV_F_UP; 769 770 return 0; 771 } 772 773 void 774 iocpt_stop(struct iocpt_dev *dev) 775 { 776 uint32_t i; 777 778 IOCPT_PRINT_CALL(); 779 780 dev->state &= ~IOCPT_DEV_F_UP; 781 782 for (i = 0; i < dev->crypto_dev->data->nb_queue_pairs; i++) { 783 struct iocpt_crypto_q *cptq = dev->cryptoqs[i]; 784 785 if (cptq->flags & IOCPT_Q_F_INITED) 786 (void)iocpt_cryptoq_deinit(cptq); 787 } 788 } 789 790 void 791 iocpt_deinit(struct iocpt_dev *dev) 792 { 793 IOCPT_PRINT_CALL(); 794 795 if (!(dev->state & IOCPT_DEV_F_INITED)) 796 return; 797 798 iocpt_adminq_deinit(dev); 799 800 dev->state &= ~IOCPT_DEV_F_INITED; 801 } 802 803 static void 804 iocpt_free_objs(struct iocpt_dev *dev) 805 { 806 void **queue_pairs = dev->crypto_dev->data->queue_pairs; 807 uint32_t i; 808 809 IOCPT_PRINT_CALL(); 810 811 for (i = 0; i < dev->crypto_dev->data->nb_queue_pairs; i++) { 812 iocpt_cryptoq_free(queue_pairs[i]); 813 queue_pairs[i] = NULL; 814 } 815 816 if (dev->sess_bm != NULL) { 817 rte_bitmap_free(dev->sess_bm); 818 rte_free(dev->sess_bm); 819 dev->sess_bm = NULL; 820 } 821 822 if (dev->adminq != NULL) { 823 iocpt_adminq_free(dev->adminq); 824 dev->adminq = NULL; 825 } 826 827 if (dev->cryptoqs != NULL) { 828 rte_free(dev->cryptoqs); 829 dev->cryptoqs = NULL; 830 } 831 832 if (dev->info != NULL) { 833 rte_memzone_free(dev->info_z); 834 dev->info_z = NULL; 835 dev->info = NULL; 836 dev->info_pa = 0; 837 } 838 } 839 840 static int 841 iocpt_devargs(struct rte_devargs *devargs, struct iocpt_dev *dev) 842 { 843 RTE_SET_USED(devargs); 844 RTE_SET_USED(dev); 845 846 return 0; 847 } 848 849 int 850 iocpt_probe(void *bus_dev, struct rte_device *rte_dev, 851 struct iocpt_dev_bars *bars, const struct iocpt_dev_intf *intf, 852 uint8_t driver_id, uint8_t socket_id) 853 { 854 struct rte_cryptodev_pmd_init_params init_params = { 855 "iocpt", 856 sizeof(struct iocpt_dev), 857 socket_id, 858 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS 859 }; 860 struct rte_cryptodev *cdev; 861 struct iocpt_dev *dev; 862 uint32_t i, sig; 863 int err; 864 865 /* Check structs (trigger error at compilation time) */ 866 iocpt_struct_size_checks(); 867 868 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 869 IOCPT_PRINT(ERR, "Multi-process not supported"); 870 err = -EPERM; 871 goto err; 872 } 873 874 cdev = rte_cryptodev_pmd_create(rte_dev->name, rte_dev, &init_params); 875 if (cdev == NULL) { 876 IOCPT_PRINT(ERR, "Out of memory"); 877 err = -ENOMEM; 878 goto err; 879 } 880 881 dev = cdev->data->dev_private; 882 dev->crypto_dev = cdev; 883 dev->bus_dev = bus_dev; 884 dev->intf = intf; 885 dev->driver_id = driver_id; 886 dev->socket_id = socket_id; 887 888 for (i = 0; i < bars->num_bars; i++) { 889 struct ionic_dev_bar *bar = &bars->bar[i]; 890 891 IOCPT_PRINT(DEBUG, 892 "bar[%u] = { .va = %p, .pa = %#jx, .len = %lu }", 893 i, bar->vaddr, bar->bus_addr, bar->len); 894 if (bar->vaddr == NULL) { 895 IOCPT_PRINT(ERR, "Null bar found, aborting"); 896 err = -EFAULT; 897 goto err_destroy_crypto_dev; 898 } 899 900 dev->bars.bar[i].vaddr = bar->vaddr; 901 dev->bars.bar[i].bus_addr = bar->bus_addr; 902 dev->bars.bar[i].len = bar->len; 903 } 904 dev->bars.num_bars = bars->num_bars; 905 906 err = iocpt_devargs(rte_dev->devargs, dev); 907 if (err != 0) { 908 IOCPT_PRINT(ERR, "Cannot parse device arguments"); 909 goto err_destroy_crypto_dev; 910 } 911 912 err = iocpt_setup_bars(dev); 913 if (err != 0) { 914 IOCPT_PRINT(ERR, "Cannot setup BARs: %d, aborting", err); 915 goto err_destroy_crypto_dev; 916 } 917 918 sig = ioread32(&dev->dev_info->signature); 919 if (sig != IOCPT_DEV_INFO_SIGNATURE) { 920 IOCPT_PRINT(ERR, "Incompatible firmware signature %#x", sig); 921 err = -EFAULT; 922 goto err_destroy_crypto_dev; 923 } 924 925 for (i = 0; i < IOCPT_FWVERS_BUFLEN; i++) 926 dev->fw_version[i] = ioread8(&dev->dev_info->fw_version[i]); 927 dev->fw_version[IOCPT_FWVERS_BUFLEN - 1] = '\0'; 928 IOCPT_PRINT(DEBUG, "%s firmware: %s", dev->name, dev->fw_version); 929 930 err = iocpt_dev_identify(dev); 931 if (err != 0) { 932 IOCPT_PRINT(ERR, "Cannot identify device: %d, aborting", 933 err); 934 goto err_destroy_crypto_dev; 935 } 936 937 err = iocpt_alloc_objs(dev); 938 if (err != 0) { 939 IOCPT_PRINT(ERR, "Cannot alloc device objects: %d", err); 940 goto err_destroy_crypto_dev; 941 } 942 943 err = iocpt_init(dev); 944 if (err != 0) { 945 IOCPT_PRINT(ERR, "Cannot init device: %d, aborting", err); 946 goto err_free_objs; 947 } 948 949 err = iocpt_assign_ops(cdev); 950 if (err != 0) { 951 IOCPT_PRINT(ERR, "Failed to configure opts"); 952 goto err_deinit_dev; 953 } 954 955 return 0; 956 957 err_deinit_dev: 958 iocpt_deinit(dev); 959 err_free_objs: 960 iocpt_free_objs(dev); 961 err_destroy_crypto_dev: 962 rte_cryptodev_pmd_destroy(cdev); 963 err: 964 return err; 965 } 966 967 int 968 iocpt_remove(struct rte_device *rte_dev) 969 { 970 struct rte_cryptodev *cdev; 971 struct iocpt_dev *dev; 972 973 cdev = rte_cryptodev_pmd_get_named_dev(rte_dev->name); 974 if (cdev == NULL) { 975 IOCPT_PRINT(DEBUG, "Cannot find device %s", rte_dev->name); 976 return -ENODEV; 977 } 978 979 dev = cdev->data->dev_private; 980 981 iocpt_deinit(dev); 982 983 iocpt_dev_reset(dev); 984 985 iocpt_free_objs(dev); 986 987 rte_cryptodev_pmd_destroy(cdev); 988 989 return 0; 990 } 991 992 RTE_LOG_REGISTER_DEFAULT(iocpt_logtype, NOTICE); 993