1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2021-2024 Advanced Micro Devices, Inc. 3 */ 4 5 #include <inttypes.h> 6 7 #include <rte_common.h> 8 #include <rte_malloc.h> 9 #include <rte_bitops.h> 10 11 #include "ionic_crypto.h" 12 13 static int 14 iocpt_cq_init(struct iocpt_cq *cq, uint16_t num_descs) 15 { 16 if (!rte_is_power_of_2(num_descs) || 17 num_descs < IOCPT_MIN_RING_DESC || 18 num_descs > IOCPT_MAX_RING_DESC) { 19 IOCPT_PRINT(ERR, "%u descriptors (min: %u max: %u)", 20 num_descs, IOCPT_MIN_RING_DESC, IOCPT_MAX_RING_DESC); 21 return -EINVAL; 22 } 23 24 cq->num_descs = num_descs; 25 cq->size_mask = num_descs - 1; 26 cq->tail_idx = 0; 27 cq->done_color = 1; 28 29 return 0; 30 } 31 32 static void 33 iocpt_cq_map(struct iocpt_cq *cq, void *base, rte_iova_t base_pa) 34 { 35 cq->base = base; 36 cq->base_pa = base_pa; 37 } 38 39 uint32_t 40 iocpt_cq_service(struct iocpt_cq *cq, uint32_t work_to_do, 41 iocpt_cq_cb cb, void *cb_arg) 42 { 43 uint32_t work_done = 0; 44 45 if (work_to_do == 0) 46 return 0; 47 48 while (cb(cq, cq->tail_idx, cb_arg)) { 49 cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); 50 if (cq->tail_idx == 0) 51 cq->done_color = !cq->done_color; 52 53 if (++work_done == work_to_do) 54 break; 55 } 56 57 return work_done; 58 } 59 60 static int 61 iocpt_q_init(struct iocpt_queue *q, uint8_t type, uint32_t index, 62 uint16_t num_descs, uint16_t num_segs, uint32_t socket_id) 63 { 64 uint32_t ring_size; 65 66 if (!rte_is_power_of_2(num_descs)) 67 return -EINVAL; 68 69 ring_size = rte_log2_u32(num_descs); 70 if (ring_size < 2 || ring_size > 16) 71 return -EINVAL; 72 73 q->type = type; 74 q->index = index; 75 q->num_descs = num_descs; 76 q->num_segs = num_segs; 77 q->size_mask = num_descs - 1; 78 q->head_idx = 0; 79 q->tail_idx = 0; 80 81 q->info = rte_calloc_socket("iocpt", 82 num_descs * num_segs, sizeof(void *), 83 rte_mem_page_size(), socket_id); 84 if (q->info == NULL) { 85 IOCPT_PRINT(ERR, "Cannot allocate queue info"); 86 return -ENOMEM; 87 } 88 89 return 0; 90 } 91 92 static void 93 iocpt_q_map(struct iocpt_queue *q, void *base, rte_iova_t base_pa) 94 { 95 q->base = base; 96 q->base_pa = base_pa; 97 } 98 99 static void 100 iocpt_q_sg_map(struct iocpt_queue *q, void *base, rte_iova_t base_pa) 101 { 102 q->sg_base = base; 103 q->sg_base_pa = base_pa; 104 } 105 106 static void 107 iocpt_q_free(struct iocpt_queue *q) 108 { 109 if (q->info != NULL) { 110 rte_free(q->info); 111 q->info = NULL; 112 } 113 } 114 115 static int 116 iocpt_session_write(struct iocpt_session_priv *priv, 117 enum iocpt_sess_control_oper oper) 118 { 119 struct iocpt_dev *dev = priv->dev; 120 struct iocpt_admin_ctx ctx = { 121 .pending_work = true, 122 .cmd.sess_control = { 123 .opcode = IOCPT_CMD_SESS_CONTROL, 124 .type = priv->type, 125 .oper = oper, 126 .index = rte_cpu_to_le_32(priv->index), 127 .key_len = rte_cpu_to_le_16(priv->key_len), 128 .key_seg_len = (uint8_t)RTE_MIN(priv->key_len, 129 IOCPT_SESS_KEY_SEG_LEN), 130 }, 131 }; 132 struct iocpt_sess_control_cmd *cmd = &ctx.cmd.sess_control; 133 uint16_t key_offset; 134 uint8_t key_segs, seg; 135 int err; 136 137 key_segs = ((priv->key_len - 1) >> IOCPT_SESS_KEY_SEG_SHFT) + 1; 138 139 for (seg = 0; seg < key_segs; seg++) { 140 ctx.pending_work = true; 141 142 key_offset = seg * cmd->key_seg_len; 143 memcpy(cmd->key, &priv->key[key_offset], 144 IOCPT_SESS_KEY_SEG_LEN); 145 cmd->key_seg_idx = seg; 146 147 /* Mark final segment */ 148 if (seg + 1 == key_segs) 149 cmd->flags |= rte_cpu_to_le_16(IOCPT_SCTL_F_END); 150 151 err = iocpt_adminq_post_wait(dev, &ctx); 152 if (err != 0) 153 return err; 154 } 155 156 return 0; 157 } 158 159 int 160 iocpt_session_init(struct iocpt_session_priv *priv) 161 { 162 struct iocpt_dev *dev = priv->dev; 163 uint64_t bm_slab = 0; 164 uint32_t bm_pos = 0; 165 int err = 0; 166 167 rte_spinlock_lock(&dev->adminq_lock); 168 169 if (rte_bitmap_scan(dev->sess_bm, &bm_pos, &bm_slab) > 0) { 170 priv->index = bm_pos + rte_ctz64(bm_slab); 171 rte_bitmap_clear(dev->sess_bm, priv->index); 172 } else 173 err = -ENOSPC; 174 175 rte_spinlock_unlock(&dev->adminq_lock); 176 177 if (err != 0) { 178 IOCPT_PRINT(ERR, "session index space exhausted"); 179 return err; 180 } 181 182 err = iocpt_session_write(priv, IOCPT_SESS_INIT); 183 if (err != 0) { 184 rte_spinlock_lock(&dev->adminq_lock); 185 rte_bitmap_set(dev->sess_bm, priv->index); 186 rte_spinlock_unlock(&dev->adminq_lock); 187 return err; 188 } 189 190 priv->flags |= IOCPT_S_F_INITED; 191 192 return 0; 193 } 194 195 int 196 iocpt_session_update(struct iocpt_session_priv *priv) 197 { 198 return iocpt_session_write(priv, IOCPT_SESS_UPDATE_KEY); 199 } 200 201 void 202 iocpt_session_deinit(struct iocpt_session_priv *priv) 203 { 204 struct iocpt_dev *dev = priv->dev; 205 struct iocpt_admin_ctx ctx = { 206 .pending_work = true, 207 .cmd.sess_control = { 208 .opcode = IOCPT_CMD_SESS_CONTROL, 209 .type = priv->type, 210 .oper = IOCPT_SESS_DISABLE, 211 .index = rte_cpu_to_le_32(priv->index), 212 .key_len = rte_cpu_to_le_16(priv->key_len), 213 }, 214 }; 215 216 (void)iocpt_adminq_post_wait(dev, &ctx); 217 218 rte_spinlock_lock(&dev->adminq_lock); 219 rte_bitmap_set(dev->sess_bm, priv->index); 220 rte_spinlock_unlock(&dev->adminq_lock); 221 222 priv->flags &= ~IOCPT_S_F_INITED; 223 } 224 225 static const struct rte_memzone * 226 iocpt_dma_zone_reserve(const char *type_name, uint16_t qid, size_t size, 227 unsigned int align, int socket_id) 228 { 229 char zone_name[RTE_MEMZONE_NAMESIZE]; 230 const struct rte_memzone *mz; 231 int err; 232 233 err = snprintf(zone_name, sizeof(zone_name), 234 "iocpt_%s_%u", type_name, qid); 235 if (err >= RTE_MEMZONE_NAMESIZE) { 236 IOCPT_PRINT(ERR, "Name %s too long", type_name); 237 return NULL; 238 } 239 240 mz = rte_memzone_lookup(zone_name); 241 if (mz != NULL) 242 return mz; 243 244 return rte_memzone_reserve_aligned(zone_name, size, socket_id, 245 RTE_MEMZONE_IOVA_CONTIG, align); 246 } 247 248 static int 249 iocpt_commonq_alloc(struct iocpt_dev *dev, 250 uint8_t type, 251 size_t struct_size, 252 uint32_t socket_id, 253 uint32_t index, 254 const char *type_name, 255 uint16_t flags, 256 uint16_t num_descs, 257 uint16_t num_segs, 258 uint16_t desc_size, 259 uint16_t cq_desc_size, 260 uint16_t sg_desc_size, 261 struct iocpt_common_q **comq) 262 { 263 struct iocpt_common_q *new; 264 uint32_t q_size, cq_size, sg_size, total_size; 265 void *q_base, *cq_base, *sg_base; 266 rte_iova_t q_base_pa = 0; 267 rte_iova_t cq_base_pa = 0; 268 rte_iova_t sg_base_pa = 0; 269 size_t page_size = rte_mem_page_size(); 270 int err; 271 272 *comq = NULL; 273 274 q_size = num_descs * desc_size; 275 cq_size = num_descs * cq_desc_size; 276 sg_size = num_descs * sg_desc_size; 277 278 /* 279 * Note: aligning q_size/cq_size is not enough due to cq_base address 280 * aligning as q_base could be not aligned to the page. 281 * Adding page_size. 282 */ 283 total_size = RTE_ALIGN(q_size, page_size) + 284 RTE_ALIGN(cq_size, page_size) + page_size; 285 if (flags & IOCPT_Q_F_SG) 286 total_size += RTE_ALIGN(sg_size, page_size) + page_size; 287 288 new = rte_zmalloc_socket("iocpt", struct_size, 289 RTE_CACHE_LINE_SIZE, socket_id); 290 if (new == NULL) { 291 IOCPT_PRINT(ERR, "Cannot allocate queue structure"); 292 return -ENOMEM; 293 } 294 295 new->dev = dev; 296 297 err = iocpt_q_init(&new->q, type, index, num_descs, num_segs, 298 socket_id); 299 if (err != 0) { 300 IOCPT_PRINT(ERR, "Queue initialization failed"); 301 goto err_free_q; 302 } 303 304 err = iocpt_cq_init(&new->cq, num_descs); 305 if (err != 0) { 306 IOCPT_PRINT(ERR, "Completion queue initialization failed"); 307 goto err_deinit_q; 308 } 309 310 new->base_z = iocpt_dma_zone_reserve(type_name, index, total_size, 311 IONIC_ALIGN, socket_id); 312 if (new->base_z == NULL) { 313 IOCPT_PRINT(ERR, "Cannot reserve queue DMA memory"); 314 err = -ENOMEM; 315 goto err_deinit_cq; 316 } 317 318 new->base = new->base_z->addr; 319 new->base_pa = new->base_z->iova; 320 321 q_base = new->base; 322 q_base_pa = new->base_pa; 323 iocpt_q_map(&new->q, q_base, q_base_pa); 324 325 cq_base = (void *)RTE_ALIGN((uintptr_t)q_base + q_size, page_size); 326 cq_base_pa = RTE_ALIGN(q_base_pa + q_size, page_size); 327 iocpt_cq_map(&new->cq, cq_base, cq_base_pa); 328 329 if (flags & IOCPT_Q_F_SG) { 330 sg_base = (void *)RTE_ALIGN((uintptr_t)cq_base + cq_size, 331 page_size); 332 sg_base_pa = RTE_ALIGN(cq_base_pa + cq_size, page_size); 333 iocpt_q_sg_map(&new->q, sg_base, sg_base_pa); 334 } 335 336 IOCPT_PRINT(DEBUG, "q_base_pa %#jx cq_base_pa %#jx sg_base_pa %#jx", 337 q_base_pa, cq_base_pa, sg_base_pa); 338 339 *comq = new; 340 341 return 0; 342 343 err_deinit_cq: 344 err_deinit_q: 345 iocpt_q_free(&new->q); 346 err_free_q: 347 rte_free(new); 348 return err; 349 } 350 351 struct ionic_doorbell * 352 iocpt_db_map(struct iocpt_dev *dev, struct iocpt_queue *q) 353 { 354 return dev->db_pages + q->hw_type; 355 } 356 357 static int 358 iocpt_adminq_alloc(struct iocpt_dev *dev) 359 { 360 struct iocpt_admin_q *aq; 361 uint16_t num_descs = IOCPT_ADMINQ_LENGTH; 362 uint16_t flags = 0; 363 int err; 364 365 err = iocpt_commonq_alloc(dev, 366 IOCPT_QTYPE_ADMINQ, 367 sizeof(struct iocpt_admin_q), 368 rte_socket_id(), 369 0, 370 "admin", 371 flags, 372 num_descs, 373 1, 374 sizeof(struct iocpt_admin_cmd), 375 sizeof(struct iocpt_admin_comp), 376 0, 377 (struct iocpt_common_q **)&aq); 378 if (err != 0) 379 return err; 380 381 aq->flags = flags; 382 383 dev->adminq = aq; 384 385 return 0; 386 } 387 388 static int 389 iocpt_adminq_init(struct iocpt_dev *dev) 390 { 391 return iocpt_dev_adminq_init(dev); 392 } 393 394 static void 395 iocpt_adminq_deinit(struct iocpt_dev *dev) 396 { 397 dev->adminq->flags &= ~IOCPT_Q_F_INITED; 398 } 399 400 static void 401 iocpt_adminq_free(struct iocpt_admin_q *aq) 402 { 403 if (aq->base_z != NULL) { 404 rte_memzone_free(aq->base_z); 405 aq->base_z = NULL; 406 aq->base = NULL; 407 aq->base_pa = 0; 408 } 409 410 iocpt_q_free(&aq->q); 411 412 rte_free(aq); 413 } 414 415 static int 416 iocpt_alloc_objs(struct iocpt_dev *dev) 417 { 418 uint32_t bmsize, i; 419 uint8_t *bm; 420 int err; 421 422 IOCPT_PRINT(DEBUG, "Crypto: %s", dev->name); 423 424 rte_spinlock_init(&dev->adminq_lock); 425 rte_spinlock_init(&dev->adminq_service_lock); 426 427 err = iocpt_adminq_alloc(dev); 428 if (err != 0) { 429 IOCPT_PRINT(ERR, "Cannot allocate admin queue"); 430 err = -ENOMEM; 431 goto err_out; 432 } 433 434 dev->info_sz = RTE_ALIGN(sizeof(*dev->info), rte_mem_page_size()); 435 dev->info_z = iocpt_dma_zone_reserve("info", 0, dev->info_sz, 436 IONIC_ALIGN, dev->socket_id); 437 if (dev->info_z == NULL) { 438 IOCPT_PRINT(ERR, "Cannot allocate dev info memory"); 439 err = -ENOMEM; 440 goto err_free_adminq; 441 } 442 443 dev->info = dev->info_z->addr; 444 dev->info_pa = dev->info_z->iova; 445 446 bmsize = rte_bitmap_get_memory_footprint(dev->max_sessions); 447 bm = rte_malloc_socket("iocpt", bmsize, 448 RTE_CACHE_LINE_SIZE, dev->socket_id); 449 if (bm == NULL) { 450 IOCPT_PRINT(ERR, "Cannot allocate %uB bitmap memory", bmsize); 451 err = -ENOMEM; 452 goto err_free_dmazone; 453 } 454 455 dev->sess_bm = rte_bitmap_init(dev->max_sessions, bm, bmsize); 456 if (dev->sess_bm == NULL) { 457 IOCPT_PRINT(ERR, "Cannot initialize bitmap"); 458 err = -EFAULT; 459 goto err_free_bm; 460 } 461 for (i = 0; i < dev->max_sessions; i++) 462 rte_bitmap_set(dev->sess_bm, i); 463 464 return 0; 465 466 err_free_bm: 467 rte_free(bm); 468 err_free_dmazone: 469 rte_memzone_free(dev->info_z); 470 dev->info_z = NULL; 471 dev->info = NULL; 472 dev->info_pa = 0; 473 err_free_adminq: 474 iocpt_adminq_free(dev->adminq); 475 dev->adminq = NULL; 476 err_out: 477 return err; 478 } 479 480 static int 481 iocpt_init(struct iocpt_dev *dev) 482 { 483 int err; 484 485 /* Uses dev_cmds */ 486 err = iocpt_dev_init(dev, dev->info_pa); 487 if (err != 0) 488 return err; 489 490 err = iocpt_adminq_init(dev); 491 if (err != 0) 492 return err; 493 494 dev->state |= IOCPT_DEV_F_INITED; 495 496 return 0; 497 } 498 499 void 500 iocpt_configure(struct iocpt_dev *dev) 501 { 502 RTE_SET_USED(dev); 503 } 504 505 void 506 iocpt_deinit(struct iocpt_dev *dev) 507 { 508 IOCPT_PRINT_CALL(); 509 510 if (!(dev->state & IOCPT_DEV_F_INITED)) 511 return; 512 513 iocpt_adminq_deinit(dev); 514 515 dev->state &= ~IOCPT_DEV_F_INITED; 516 } 517 518 static void 519 iocpt_free_objs(struct iocpt_dev *dev) 520 { 521 IOCPT_PRINT_CALL(); 522 523 if (dev->sess_bm != NULL) { 524 rte_bitmap_free(dev->sess_bm); 525 rte_free(dev->sess_bm); 526 dev->sess_bm = NULL; 527 } 528 529 if (dev->adminq != NULL) { 530 iocpt_adminq_free(dev->adminq); 531 dev->adminq = NULL; 532 } 533 534 if (dev->info != NULL) { 535 rte_memzone_free(dev->info_z); 536 dev->info_z = NULL; 537 dev->info = NULL; 538 dev->info_pa = 0; 539 } 540 } 541 542 static int 543 iocpt_devargs(struct rte_devargs *devargs, struct iocpt_dev *dev) 544 { 545 RTE_SET_USED(devargs); 546 RTE_SET_USED(dev); 547 548 return 0; 549 } 550 551 int 552 iocpt_probe(void *bus_dev, struct rte_device *rte_dev, 553 struct iocpt_dev_bars *bars, const struct iocpt_dev_intf *intf, 554 uint8_t driver_id, uint8_t socket_id) 555 { 556 struct rte_cryptodev_pmd_init_params init_params = { 557 "iocpt", 558 sizeof(struct iocpt_dev), 559 socket_id, 560 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS 561 }; 562 struct rte_cryptodev *cdev; 563 struct iocpt_dev *dev; 564 uint32_t i, sig; 565 int err; 566 567 /* Check structs (trigger error at compilation time) */ 568 iocpt_struct_size_checks(); 569 570 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 571 IOCPT_PRINT(ERR, "Multi-process not supported"); 572 err = -EPERM; 573 goto err; 574 } 575 576 cdev = rte_cryptodev_pmd_create(rte_dev->name, rte_dev, &init_params); 577 if (cdev == NULL) { 578 IOCPT_PRINT(ERR, "Out of memory"); 579 err = -ENOMEM; 580 goto err; 581 } 582 583 dev = cdev->data->dev_private; 584 dev->crypto_dev = cdev; 585 dev->bus_dev = bus_dev; 586 dev->intf = intf; 587 dev->driver_id = driver_id; 588 dev->socket_id = socket_id; 589 590 for (i = 0; i < bars->num_bars; i++) { 591 struct ionic_dev_bar *bar = &bars->bar[i]; 592 593 IOCPT_PRINT(DEBUG, 594 "bar[%u] = { .va = %p, .pa = %#jx, .len = %lu }", 595 i, bar->vaddr, bar->bus_addr, bar->len); 596 if (bar->vaddr == NULL) { 597 IOCPT_PRINT(ERR, "Null bar found, aborting"); 598 err = -EFAULT; 599 goto err_destroy_crypto_dev; 600 } 601 602 dev->bars.bar[i].vaddr = bar->vaddr; 603 dev->bars.bar[i].bus_addr = bar->bus_addr; 604 dev->bars.bar[i].len = bar->len; 605 } 606 dev->bars.num_bars = bars->num_bars; 607 608 err = iocpt_devargs(rte_dev->devargs, dev); 609 if (err != 0) { 610 IOCPT_PRINT(ERR, "Cannot parse device arguments"); 611 goto err_destroy_crypto_dev; 612 } 613 614 err = iocpt_setup_bars(dev); 615 if (err != 0) { 616 IOCPT_PRINT(ERR, "Cannot setup BARs: %d, aborting", err); 617 goto err_destroy_crypto_dev; 618 } 619 620 sig = ioread32(&dev->dev_info->signature); 621 if (sig != IOCPT_DEV_INFO_SIGNATURE) { 622 IOCPT_PRINT(ERR, "Incompatible firmware signature %#x", sig); 623 err = -EFAULT; 624 goto err_destroy_crypto_dev; 625 } 626 627 for (i = 0; i < IOCPT_FWVERS_BUFLEN; i++) 628 dev->fw_version[i] = ioread8(&dev->dev_info->fw_version[i]); 629 dev->fw_version[IOCPT_FWVERS_BUFLEN - 1] = '\0'; 630 IOCPT_PRINT(DEBUG, "%s firmware: %s", dev->name, dev->fw_version); 631 632 err = iocpt_dev_identify(dev); 633 if (err != 0) { 634 IOCPT_PRINT(ERR, "Cannot identify device: %d, aborting", 635 err); 636 goto err_destroy_crypto_dev; 637 } 638 639 err = iocpt_alloc_objs(dev); 640 if (err != 0) { 641 IOCPT_PRINT(ERR, "Cannot alloc device objects: %d", err); 642 goto err_destroy_crypto_dev; 643 } 644 645 err = iocpt_init(dev); 646 if (err != 0) { 647 IOCPT_PRINT(ERR, "Cannot init device: %d, aborting", err); 648 goto err_free_objs; 649 } 650 651 err = iocpt_assign_ops(cdev); 652 if (err != 0) { 653 IOCPT_PRINT(ERR, "Failed to configure opts"); 654 goto err_deinit_dev; 655 } 656 657 return 0; 658 659 err_deinit_dev: 660 iocpt_deinit(dev); 661 err_free_objs: 662 iocpt_free_objs(dev); 663 err_destroy_crypto_dev: 664 rte_cryptodev_pmd_destroy(cdev); 665 err: 666 return err; 667 } 668 669 int 670 iocpt_remove(struct rte_device *rte_dev) 671 { 672 struct rte_cryptodev *cdev; 673 struct iocpt_dev *dev; 674 675 cdev = rte_cryptodev_pmd_get_named_dev(rte_dev->name); 676 if (cdev == NULL) { 677 IOCPT_PRINT(DEBUG, "Cannot find device %s", rte_dev->name); 678 return -ENODEV; 679 } 680 681 dev = cdev->data->dev_private; 682 683 iocpt_deinit(dev); 684 685 iocpt_dev_reset(dev); 686 687 iocpt_free_objs(dev); 688 689 rte_cryptodev_pmd_destroy(cdev); 690 691 return 0; 692 } 693 694 RTE_LOG_REGISTER_DEFAULT(iocpt_logtype, NOTICE); 695