1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD. 3 */ 4 #include <rte_errno.h> 5 #include <rte_pci.h> 6 #include <rte_bus_pci.h> 7 #include <rte_cryptodev.h> 8 #include <rte_cryptodev_pmd.h> 9 #include <rte_eal.h> 10 11 #include "virtio_cryptodev.h" 12 #include "virtqueue.h" 13 14 int virtio_crypto_logtype_init; 15 int virtio_crypto_logtype_session; 16 int virtio_crypto_logtype_rx; 17 int virtio_crypto_logtype_tx; 18 int virtio_crypto_logtype_driver; 19 20 static int virtio_crypto_dev_configure(struct rte_cryptodev *dev, 21 struct rte_cryptodev_config *config); 22 static int virtio_crypto_dev_start(struct rte_cryptodev *dev); 23 static void virtio_crypto_dev_stop(struct rte_cryptodev *dev); 24 static int virtio_crypto_dev_close(struct rte_cryptodev *dev); 25 static void virtio_crypto_dev_info_get(struct rte_cryptodev *dev, 26 struct rte_cryptodev_info *dev_info); 27 static int virtio_crypto_qp_setup(struct rte_cryptodev *dev, 28 uint16_t queue_pair_id, 29 const struct rte_cryptodev_qp_conf *qp_conf, 30 int socket_id, 31 struct rte_mempool *session_pool); 32 static int virtio_crypto_qp_release(struct rte_cryptodev *dev, 33 uint16_t queue_pair_id); 34 static void virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev); 35 36 /* 37 * The set of PCI devices this driver supports 38 */ 39 static const struct rte_pci_id pci_id_virtio_crypto_map[] = { 40 { RTE_PCI_DEVICE(VIRTIO_CRYPTO_PCI_VENDORID, 41 VIRTIO_CRYPTO_PCI_DEVICEID) }, 42 { .vendor_id = 0, /* sentinel */ }, 43 }; 44 45 uint8_t cryptodev_virtio_driver_id; 46 47 void 48 virtio_crypto_queue_release(struct virtqueue *vq) 49 { 50 struct virtio_crypto_hw *hw; 51 52 PMD_INIT_FUNC_TRACE(); 53 54 if (vq) { 55 hw = vq->hw; 56 /* Select and deactivate the queue */ 57 VTPCI_OPS(hw)->del_queue(hw, vq); 58 59 rte_memzone_free(vq->mz); 60 rte_mempool_free(vq->mpool); 61 rte_free(vq); 62 } 63 } 64 65 #define MPOOL_MAX_NAME_SZ 32 66 67 int 68 virtio_crypto_queue_setup(struct rte_cryptodev *dev, 69 int queue_type, 70 uint16_t vtpci_queue_idx, 71 uint16_t nb_desc, 72 int socket_id, 73 struct virtqueue **pvq) 74 { 75 char vq_name[VIRTQUEUE_MAX_NAME_SZ]; 76 char mpool_name[MPOOL_MAX_NAME_SZ]; 77 const struct rte_memzone *mz; 78 unsigned int vq_size, size; 79 struct virtio_crypto_hw *hw = dev->data->dev_private; 80 struct virtqueue *vq = NULL; 81 uint32_t i = 0; 82 uint32_t j; 83 84 PMD_INIT_FUNC_TRACE(); 85 86 VIRTIO_CRYPTO_INIT_LOG_DBG("setting up queue: %u", vtpci_queue_idx); 87 88 /* 89 * Read the virtqueue size from the Queue Size field 90 * Always power of 2 and if 0 virtqueue does not exist 91 */ 92 vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx); 93 if (vq_size == 0) { 94 VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue does not exist"); 95 return -EINVAL; 96 } 97 VIRTIO_CRYPTO_INIT_LOG_DBG("vq_size: %u", vq_size); 98 99 if (!rte_is_power_of_2(vq_size)) { 100 VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue size is not powerof 2"); 101 return -EINVAL; 102 } 103 104 if (queue_type == VTCRYPTO_DATAQ) { 105 snprintf(vq_name, sizeof(vq_name), "dev%d_dataqueue%d", 106 dev->data->dev_id, vtpci_queue_idx); 107 snprintf(mpool_name, sizeof(mpool_name), 108 "dev%d_dataqueue%d_mpool", 109 dev->data->dev_id, vtpci_queue_idx); 110 } else if (queue_type == VTCRYPTO_CTRLQ) { 111 snprintf(vq_name, sizeof(vq_name), "dev%d_controlqueue", 112 dev->data->dev_id); 113 snprintf(mpool_name, sizeof(mpool_name), 114 "dev%d_controlqueue_mpool", 115 dev->data->dev_id); 116 } 117 size = RTE_ALIGN_CEIL(sizeof(*vq) + 118 vq_size * sizeof(struct vq_desc_extra), 119 RTE_CACHE_LINE_SIZE); 120 vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE, 121 socket_id); 122 if (vq == NULL) { 123 VIRTIO_CRYPTO_INIT_LOG_ERR("Can not allocate virtqueue"); 124 return -ENOMEM; 125 } 126 127 if (queue_type == VTCRYPTO_DATAQ) { 128 /* pre-allocate a mempool and use it in the data plane to 129 * improve performance 130 */ 131 vq->mpool = rte_mempool_lookup(mpool_name); 132 if (vq->mpool == NULL) 133 vq->mpool = rte_mempool_create(mpool_name, 134 vq_size, 135 sizeof(struct virtio_crypto_op_cookie), 136 RTE_CACHE_LINE_SIZE, 0, 137 NULL, NULL, NULL, NULL, socket_id, 138 0); 139 if (!vq->mpool) { 140 VIRTIO_CRYPTO_DRV_LOG_ERR("Virtio Crypto PMD " 141 "Cannot create mempool"); 142 goto mpool_create_err; 143 } 144 for (i = 0; i < vq_size; i++) { 145 vq->vq_descx[i].cookie = 146 rte_zmalloc("crypto PMD op cookie pointer", 147 sizeof(struct virtio_crypto_op_cookie), 148 RTE_CACHE_LINE_SIZE); 149 if (vq->vq_descx[i].cookie == NULL) { 150 VIRTIO_CRYPTO_DRV_LOG_ERR("Failed to " 151 "alloc mem for cookie"); 152 goto cookie_alloc_err; 153 } 154 } 155 } 156 157 vq->hw = hw; 158 vq->dev_id = dev->data->dev_id; 159 vq->vq_queue_index = vtpci_queue_idx; 160 vq->vq_nentries = vq_size; 161 162 /* 163 * Using part of the vring entries is permitted, but the maximum 164 * is vq_size 165 */ 166 if (nb_desc == 0 || nb_desc > vq_size) 167 nb_desc = vq_size; 168 vq->vq_free_cnt = nb_desc; 169 170 /* 171 * Reserve a memzone for vring elements 172 */ 173 size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN); 174 vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN); 175 VIRTIO_CRYPTO_INIT_LOG_DBG("%s vring_size: %d, rounded_vring_size: %d", 176 (queue_type == VTCRYPTO_DATAQ) ? "dataq" : "ctrlq", 177 size, vq->vq_ring_size); 178 179 mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size, 180 socket_id, 0, VIRTIO_PCI_VRING_ALIGN); 181 if (mz == NULL) { 182 if (rte_errno == EEXIST) 183 mz = rte_memzone_lookup(vq_name); 184 if (mz == NULL) { 185 VIRTIO_CRYPTO_INIT_LOG_ERR("not enough memory"); 186 goto mz_reserve_err; 187 } 188 } 189 190 /* 191 * Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit, 192 * and only accepts 32 bit page frame number. 193 * Check if the allocated physical memory exceeds 16TB. 194 */ 195 if ((mz->phys_addr + vq->vq_ring_size - 1) 196 >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) { 197 VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be " 198 "above 16TB!"); 199 goto vring_addr_err; 200 } 201 202 memset(mz->addr, 0, sizeof(mz->len)); 203 vq->mz = mz; 204 vq->vq_ring_mem = mz->phys_addr; 205 vq->vq_ring_virt_mem = mz->addr; 206 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_mem(physical): 0x%"PRIx64, 207 (uint64_t)mz->phys_addr); 208 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_virt_mem: 0x%"PRIx64, 209 (uint64_t)(uintptr_t)mz->addr); 210 211 *pvq = vq; 212 213 return 0; 214 215 vring_addr_err: 216 rte_memzone_free(mz); 217 mz_reserve_err: 218 cookie_alloc_err: 219 rte_mempool_free(vq->mpool); 220 if (i != 0) { 221 for (j = 0; j < i; j++) 222 rte_free(vq->vq_descx[j].cookie); 223 } 224 mpool_create_err: 225 rte_free(vq); 226 return -ENOMEM; 227 } 228 229 static int 230 virtio_crypto_ctrlq_setup(struct rte_cryptodev *dev, uint16_t queue_idx) 231 { 232 int ret; 233 struct virtqueue *vq; 234 struct virtio_crypto_hw *hw = dev->data->dev_private; 235 236 /* if virtio device has started, do not touch the virtqueues */ 237 if (dev->data->dev_started) 238 return 0; 239 240 PMD_INIT_FUNC_TRACE(); 241 242 ret = virtio_crypto_queue_setup(dev, VTCRYPTO_CTRLQ, queue_idx, 243 0, SOCKET_ID_ANY, &vq); 244 if (ret < 0) { 245 VIRTIO_CRYPTO_INIT_LOG_ERR("control vq initialization failed"); 246 return ret; 247 } 248 249 hw->cvq = vq; 250 251 return 0; 252 } 253 254 static void 255 virtio_crypto_free_queues(struct rte_cryptodev *dev) 256 { 257 unsigned int i; 258 struct virtio_crypto_hw *hw = dev->data->dev_private; 259 260 PMD_INIT_FUNC_TRACE(); 261 262 /* control queue release */ 263 virtio_crypto_queue_release(hw->cvq); 264 265 /* data queue release */ 266 for (i = 0; i < hw->max_dataqueues; i++) 267 virtio_crypto_queue_release(dev->data->queue_pairs[i]); 268 } 269 270 static int 271 virtio_crypto_dev_close(struct rte_cryptodev *dev __rte_unused) 272 { 273 return 0; 274 } 275 276 /* 277 * dev_ops for virtio, bare necessities for basic operation 278 */ 279 static struct rte_cryptodev_ops virtio_crypto_dev_ops = { 280 /* Device related operations */ 281 .dev_configure = virtio_crypto_dev_configure, 282 .dev_start = virtio_crypto_dev_start, 283 .dev_stop = virtio_crypto_dev_stop, 284 .dev_close = virtio_crypto_dev_close, 285 .dev_infos_get = virtio_crypto_dev_info_get, 286 287 .stats_get = NULL, 288 .stats_reset = NULL, 289 290 .queue_pair_setup = virtio_crypto_qp_setup, 291 .queue_pair_release = virtio_crypto_qp_release, 292 .queue_pair_start = NULL, 293 .queue_pair_stop = NULL, 294 .queue_pair_count = NULL, 295 296 /* Crypto related operations */ 297 .session_get_size = NULL, 298 .session_configure = NULL, 299 .session_clear = NULL, 300 .qp_attach_session = NULL, 301 .qp_detach_session = NULL 302 }; 303 304 static int 305 virtio_crypto_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id, 306 const struct rte_cryptodev_qp_conf *qp_conf, 307 int socket_id, 308 struct rte_mempool *session_pool __rte_unused) 309 { 310 int ret; 311 struct virtqueue *vq; 312 313 PMD_INIT_FUNC_TRACE(); 314 315 /* if virtio dev is started, do not touch the virtqueues */ 316 if (dev->data->dev_started) 317 return 0; 318 319 ret = virtio_crypto_queue_setup(dev, VTCRYPTO_DATAQ, queue_pair_id, 320 qp_conf->nb_descriptors, socket_id, &vq); 321 if (ret < 0) { 322 VIRTIO_CRYPTO_INIT_LOG_ERR( 323 "virtio crypto data queue initialization failed\n"); 324 return ret; 325 } 326 327 dev->data->queue_pairs[queue_pair_id] = vq; 328 329 return 0; 330 } 331 332 static int 333 virtio_crypto_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) 334 { 335 struct virtqueue *vq 336 = (struct virtqueue *)dev->data->queue_pairs[queue_pair_id]; 337 338 PMD_INIT_FUNC_TRACE(); 339 340 if (vq == NULL) { 341 VIRTIO_CRYPTO_DRV_LOG_DBG("vq already freed"); 342 return 0; 343 } 344 345 virtio_crypto_queue_release(vq); 346 return 0; 347 } 348 349 static int 350 virtio_negotiate_features(struct virtio_crypto_hw *hw, uint64_t req_features) 351 { 352 uint64_t host_features; 353 354 PMD_INIT_FUNC_TRACE(); 355 356 /* Prepare guest_features: feature that driver wants to support */ 357 VIRTIO_CRYPTO_INIT_LOG_DBG("guest_features before negotiate = %" PRIx64, 358 req_features); 359 360 /* Read device(host) feature bits */ 361 host_features = VTPCI_OPS(hw)->get_features(hw); 362 VIRTIO_CRYPTO_INIT_LOG_DBG("host_features before negotiate = %" PRIx64, 363 host_features); 364 365 /* 366 * Negotiate features: Subset of device feature bits are written back 367 * guest feature bits. 368 */ 369 hw->guest_features = req_features; 370 hw->guest_features = vtpci_cryptodev_negotiate_features(hw, 371 host_features); 372 VIRTIO_CRYPTO_INIT_LOG_DBG("features after negotiate = %" PRIx64, 373 hw->guest_features); 374 375 if (hw->modern) { 376 if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) { 377 VIRTIO_CRYPTO_INIT_LOG_ERR( 378 "VIRTIO_F_VERSION_1 features is not enabled."); 379 return -1; 380 } 381 vtpci_cryptodev_set_status(hw, 382 VIRTIO_CONFIG_STATUS_FEATURES_OK); 383 if (!(vtpci_cryptodev_get_status(hw) & 384 VIRTIO_CONFIG_STATUS_FEATURES_OK)) { 385 VIRTIO_CRYPTO_INIT_LOG_ERR("failed to set FEATURES_OK " 386 "status!"); 387 return -1; 388 } 389 } 390 391 hw->req_guest_features = req_features; 392 393 return 0; 394 } 395 396 /* reset device and renegotiate features if needed */ 397 static int 398 virtio_crypto_init_device(struct rte_cryptodev *cryptodev, 399 uint64_t req_features) 400 { 401 struct virtio_crypto_hw *hw = cryptodev->data->dev_private; 402 struct virtio_crypto_config local_config; 403 struct virtio_crypto_config *config = &local_config; 404 405 PMD_INIT_FUNC_TRACE(); 406 407 /* Reset the device although not necessary at startup */ 408 vtpci_cryptodev_reset(hw); 409 410 /* Tell the host we've noticed this device. */ 411 vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_ACK); 412 413 /* Tell the host we've known how to drive the device. */ 414 vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER); 415 if (virtio_negotiate_features(hw, req_features) < 0) 416 return -1; 417 418 /* Get status of the device */ 419 vtpci_read_cryptodev_config(hw, 420 offsetof(struct virtio_crypto_config, status), 421 &config->status, sizeof(config->status)); 422 if (config->status != VIRTIO_CRYPTO_S_HW_READY) { 423 VIRTIO_CRYPTO_DRV_LOG_ERR("accelerator hardware is " 424 "not ready"); 425 return -1; 426 } 427 428 /* Get number of data queues */ 429 vtpci_read_cryptodev_config(hw, 430 offsetof(struct virtio_crypto_config, max_dataqueues), 431 &config->max_dataqueues, 432 sizeof(config->max_dataqueues)); 433 hw->max_dataqueues = config->max_dataqueues; 434 435 VIRTIO_CRYPTO_INIT_LOG_DBG("hw->max_dataqueues=%d", 436 hw->max_dataqueues); 437 438 return 0; 439 } 440 441 /* 442 * This function is based on probe() function 443 * It returns 0 on success. 444 */ 445 static int 446 crypto_virtio_create(const char *name, struct rte_pci_device *pci_dev, 447 struct rte_cryptodev_pmd_init_params *init_params) 448 { 449 struct rte_cryptodev *cryptodev; 450 struct virtio_crypto_hw *hw; 451 452 PMD_INIT_FUNC_TRACE(); 453 454 cryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device, 455 init_params); 456 if (cryptodev == NULL) 457 return -ENODEV; 458 459 cryptodev->driver_id = cryptodev_virtio_driver_id; 460 cryptodev->dev_ops = &virtio_crypto_dev_ops; 461 462 cryptodev->enqueue_burst = virtio_crypto_pkt_tx_burst; 463 cryptodev->dequeue_burst = virtio_crypto_pkt_rx_burst; 464 465 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 466 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING; 467 468 hw = cryptodev->data->dev_private; 469 hw->dev_id = cryptodev->data->dev_id; 470 471 VIRTIO_CRYPTO_INIT_LOG_DBG("dev %d vendorID=0x%x deviceID=0x%x", 472 cryptodev->data->dev_id, pci_dev->id.vendor_id, 473 pci_dev->id.device_id); 474 475 /* pci device init */ 476 if (vtpci_cryptodev_init(pci_dev, hw)) 477 return -1; 478 479 if (virtio_crypto_init_device(cryptodev, 480 VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0) 481 return -1; 482 483 return 0; 484 } 485 486 static int 487 virtio_crypto_dev_uninit(struct rte_cryptodev *cryptodev) 488 { 489 struct virtio_crypto_hw *hw = cryptodev->data->dev_private; 490 491 PMD_INIT_FUNC_TRACE(); 492 493 if (rte_eal_process_type() == RTE_PROC_SECONDARY) 494 return -EPERM; 495 496 if (cryptodev->data->dev_started) { 497 virtio_crypto_dev_stop(cryptodev); 498 virtio_crypto_dev_close(cryptodev); 499 } 500 501 cryptodev->dev_ops = NULL; 502 cryptodev->enqueue_burst = NULL; 503 cryptodev->dequeue_burst = NULL; 504 505 /* release control queue */ 506 virtio_crypto_queue_release(hw->cvq); 507 508 rte_free(cryptodev->data); 509 cryptodev->data = NULL; 510 511 VIRTIO_CRYPTO_DRV_LOG_INFO("dev_uninit completed"); 512 513 return 0; 514 } 515 516 static int 517 virtio_crypto_dev_configure(struct rte_cryptodev *cryptodev, 518 struct rte_cryptodev_config *config __rte_unused) 519 { 520 struct virtio_crypto_hw *hw = cryptodev->data->dev_private; 521 522 PMD_INIT_FUNC_TRACE(); 523 524 if (virtio_crypto_init_device(cryptodev, 525 VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0) 526 return -1; 527 528 /* setup control queue 529 * [0, 1, ... ,(config->max_dataqueues - 1)] are data queues 530 * config->max_dataqueues is the control queue 531 */ 532 if (virtio_crypto_ctrlq_setup(cryptodev, hw->max_dataqueues) < 0) { 533 VIRTIO_CRYPTO_INIT_LOG_ERR("control queue setup error"); 534 return -1; 535 } 536 virtio_crypto_ctrlq_start(cryptodev); 537 538 return 0; 539 } 540 541 static void 542 virtio_crypto_dev_stop(struct rte_cryptodev *dev) 543 { 544 struct virtio_crypto_hw *hw = dev->data->dev_private; 545 546 PMD_INIT_FUNC_TRACE(); 547 VIRTIO_CRYPTO_DRV_LOG_DBG("virtio_dev_stop"); 548 549 vtpci_cryptodev_reset(hw); 550 551 virtio_crypto_dev_free_mbufs(dev); 552 virtio_crypto_free_queues(dev); 553 554 dev->data->dev_started = 0; 555 } 556 557 static int 558 virtio_crypto_dev_start(struct rte_cryptodev *dev) 559 { 560 struct virtio_crypto_hw *hw = dev->data->dev_private; 561 562 if (dev->data->dev_started) 563 return 0; 564 565 /* Do final configuration before queue engine starts */ 566 virtio_crypto_dataq_start(dev); 567 vtpci_cryptodev_reinit_complete(hw); 568 569 dev->data->dev_started = 1; 570 571 return 0; 572 } 573 574 static void 575 virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev) 576 { 577 uint32_t i; 578 struct virtio_crypto_hw *hw = dev->data->dev_private; 579 580 for (i = 0; i < hw->max_dataqueues; i++) { 581 VIRTIO_CRYPTO_INIT_LOG_DBG("Before freeing dataq[%d] used " 582 "and unused buf", i); 583 VIRTQUEUE_DUMP((struct virtqueue *) 584 dev->data->queue_pairs[i]); 585 586 VIRTIO_CRYPTO_INIT_LOG_DBG("queue_pairs[%d]=%p", 587 i, dev->data->queue_pairs[i]); 588 589 virtqueue_detatch_unused(dev->data->queue_pairs[i]); 590 591 VIRTIO_CRYPTO_INIT_LOG_DBG("After freeing dataq[%d] used and " 592 "unused buf", i); 593 VIRTQUEUE_DUMP( 594 (struct virtqueue *)dev->data->queue_pairs[i]); 595 } 596 } 597 598 static void 599 virtio_crypto_dev_info_get(struct rte_cryptodev *dev, 600 struct rte_cryptodev_info *info) 601 { 602 struct virtio_crypto_hw *hw = dev->data->dev_private; 603 604 PMD_INIT_FUNC_TRACE(); 605 606 if (info != NULL) { 607 info->driver_id = cryptodev_virtio_driver_id; 608 info->pci_dev = RTE_DEV_TO_PCI(dev->device); 609 info->feature_flags = dev->feature_flags; 610 info->max_nb_queue_pairs = hw->max_dataqueues; 611 info->sym.max_nb_sessions = 612 RTE_VIRTIO_CRYPTO_PMD_MAX_NB_SESSIONS; 613 } 614 } 615 616 static int 617 crypto_virtio_pci_probe( 618 struct rte_pci_driver *pci_drv __rte_unused, 619 struct rte_pci_device *pci_dev) 620 { 621 struct rte_cryptodev_pmd_init_params init_params = { 622 .name = "", 623 .socket_id = rte_socket_id(), 624 .private_data_size = sizeof(struct virtio_crypto_hw), 625 .max_nb_sessions = RTE_VIRTIO_CRYPTO_PMD_MAX_NB_SESSIONS 626 }; 627 char name[RTE_CRYPTODEV_NAME_MAX_LEN]; 628 629 VIRTIO_CRYPTO_DRV_LOG_DBG("Found Crypto device at %02x:%02x.%x", 630 pci_dev->addr.bus, 631 pci_dev->addr.devid, 632 pci_dev->addr.function); 633 634 rte_pci_device_name(&pci_dev->addr, name, sizeof(name)); 635 636 return crypto_virtio_create(name, pci_dev, &init_params); 637 } 638 639 static int 640 crypto_virtio_pci_remove( 641 struct rte_pci_device *pci_dev __rte_unused) 642 { 643 struct rte_cryptodev *cryptodev; 644 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 645 646 if (pci_dev == NULL) 647 return -EINVAL; 648 649 rte_pci_device_name(&pci_dev->addr, cryptodev_name, 650 sizeof(cryptodev_name)); 651 652 cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name); 653 if (cryptodev == NULL) 654 return -ENODEV; 655 656 return virtio_crypto_dev_uninit(cryptodev); 657 } 658 659 static struct rte_pci_driver rte_virtio_crypto_driver = { 660 .id_table = pci_id_virtio_crypto_map, 661 .drv_flags = 0, 662 .probe = crypto_virtio_pci_probe, 663 .remove = crypto_virtio_pci_remove 664 }; 665 666 static struct cryptodev_driver virtio_crypto_drv; 667 668 RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_VIRTIO_PMD, rte_virtio_crypto_driver); 669 RTE_PMD_REGISTER_CRYPTO_DRIVER(virtio_crypto_drv, 670 rte_virtio_crypto_driver.driver, 671 cryptodev_virtio_driver_id); 672 673 RTE_INIT(virtio_crypto_init_log); 674 static void 675 virtio_crypto_init_log(void) 676 { 677 virtio_crypto_logtype_init = rte_log_register("pmd.crypto.virtio.init"); 678 if (virtio_crypto_logtype_init >= 0) 679 rte_log_set_level(virtio_crypto_logtype_init, RTE_LOG_NOTICE); 680 681 virtio_crypto_logtype_session = 682 rte_log_register("pmd.crypto.virtio.session"); 683 if (virtio_crypto_logtype_session >= 0) 684 rte_log_set_level(virtio_crypto_logtype_session, 685 RTE_LOG_NOTICE); 686 687 virtio_crypto_logtype_rx = rte_log_register("pmd.crypto.virtio.rx"); 688 if (virtio_crypto_logtype_rx >= 0) 689 rte_log_set_level(virtio_crypto_logtype_rx, RTE_LOG_NOTICE); 690 691 virtio_crypto_logtype_tx = rte_log_register("pmd.crypto.virtio.tx"); 692 if (virtio_crypto_logtype_tx >= 0) 693 rte_log_set_level(virtio_crypto_logtype_tx, RTE_LOG_NOTICE); 694 695 virtio_crypto_logtype_driver = 696 rte_log_register("pmd.crypto.virtio.driver"); 697 if (virtio_crypto_logtype_driver >= 0) 698 rte_log_set_level(virtio_crypto_logtype_driver, RTE_LOG_NOTICE); 699 } 700