1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD. 3 */ 4 #include <stdbool.h> 5 #include <unistd.h> 6 7 #include <rte_common.h> 8 #include <rte_errno.h> 9 #include <rte_pci.h> 10 #include <bus_pci_driver.h> 11 #include <rte_cryptodev.h> 12 #include <cryptodev_pmd.h> 13 #include <rte_eal.h> 14 15 #include "virtio_cryptodev.h" 16 #include "virtqueue.h" 17 #include "virtio_crypto_algs.h" 18 #include "virtio_crypto_capabilities.h" 19 20 static int virtio_crypto_dev_configure(struct rte_cryptodev *dev, 21 struct rte_cryptodev_config *config); 22 static int virtio_crypto_dev_start(struct rte_cryptodev *dev); 23 static void virtio_crypto_dev_stop(struct rte_cryptodev *dev); 24 static int virtio_crypto_dev_close(struct rte_cryptodev *dev); 25 static void virtio_crypto_dev_info_get(struct rte_cryptodev *dev, 26 struct rte_cryptodev_info *dev_info); 27 static void virtio_crypto_dev_stats_get(struct rte_cryptodev *dev, 28 struct rte_cryptodev_stats *stats); 29 static void virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev); 30 static int virtio_crypto_qp_setup(struct rte_cryptodev *dev, 31 uint16_t queue_pair_id, 32 const struct rte_cryptodev_qp_conf *qp_conf, 33 int socket_id); 34 static int virtio_crypto_qp_release(struct rte_cryptodev *dev, 35 uint16_t queue_pair_id); 36 static void virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev); 37 static unsigned int virtio_crypto_sym_get_session_private_size( 38 struct rte_cryptodev *dev); 39 static void virtio_crypto_sym_clear_session(struct rte_cryptodev *dev, 40 struct rte_cryptodev_sym_session *sess); 41 static int virtio_crypto_sym_configure_session(struct rte_cryptodev *dev, 42 struct rte_crypto_sym_xform *xform, 43 struct rte_cryptodev_sym_session *session); 44 45 /* 46 * The set of PCI devices this driver supports 47 */ 48 static const struct rte_pci_id pci_id_virtio_crypto_map[] = { 49 { RTE_PCI_DEVICE(VIRTIO_CRYPTO_PCI_VENDORID, 50 VIRTIO_CRYPTO_PCI_DEVICEID) }, 51 { .vendor_id = 0, /* sentinel */ }, 52 }; 53 54 static const struct rte_cryptodev_capabilities virtio_capabilities[] = { 55 VIRTIO_SYM_CAPABILITIES, 56 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() 57 }; 58 59 uint8_t cryptodev_virtio_driver_id; 60 61 #define NUM_ENTRY_SYM_CREATE_SESSION 4 62 63 static int 64 virtio_crypto_send_command(struct virtqueue *vq, 65 struct virtio_crypto_op_ctrl_req *ctrl, uint8_t *cipher_key, 66 uint8_t *auth_key, struct virtio_crypto_session *session) 67 { 68 uint8_t idx = 0; 69 uint8_t needed = 1; 70 uint32_t head = 0; 71 uint32_t len_cipher_key = 0; 72 uint32_t len_auth_key = 0; 73 uint32_t len_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req); 74 uint32_t len_session_input = sizeof(struct virtio_crypto_session_input); 75 uint32_t len_total = 0; 76 uint32_t input_offset = 0; 77 void *virt_addr_started = NULL; 78 phys_addr_t phys_addr_started; 79 struct vring_desc *desc; 80 uint32_t desc_offset; 81 struct virtio_crypto_session_input *input; 82 int ret; 83 84 PMD_INIT_FUNC_TRACE(); 85 86 if (session == NULL) { 87 VIRTIO_CRYPTO_SESSION_LOG_ERR("session is NULL."); 88 return -EINVAL; 89 } 90 /* cipher only is supported, it is available if auth_key is NULL */ 91 if (!cipher_key) { 92 VIRTIO_CRYPTO_SESSION_LOG_ERR("cipher key is NULL."); 93 return -EINVAL; 94 } 95 96 head = vq->vq_desc_head_idx; 97 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_desc_head_idx = %d, vq = %p", 98 head, vq); 99 100 if (vq->vq_free_cnt < needed) { 101 VIRTIO_CRYPTO_SESSION_LOG_ERR("Not enough entry"); 102 return -ENOSPC; 103 } 104 105 /* calculate the length of cipher key */ 106 if (cipher_key) { 107 switch (ctrl->u.sym_create_session.op_type) { 108 case VIRTIO_CRYPTO_SYM_OP_CIPHER: 109 len_cipher_key 110 = ctrl->u.sym_create_session.u.cipher 111 .para.keylen; 112 break; 113 case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING: 114 len_cipher_key 115 = ctrl->u.sym_create_session.u.chain 116 .para.cipher_param.keylen; 117 break; 118 default: 119 VIRTIO_CRYPTO_SESSION_LOG_ERR("invalid op type"); 120 return -EINVAL; 121 } 122 } 123 124 /* calculate the length of auth key */ 125 if (auth_key) { 126 len_auth_key = 127 ctrl->u.sym_create_session.u.chain.para.u.mac_param 128 .auth_key_len; 129 } 130 131 /* 132 * malloc memory to store indirect vring_desc entries, including 133 * ctrl request, cipher key, auth key, session input and desc vring 134 */ 135 desc_offset = len_ctrl_req + len_cipher_key + len_auth_key 136 + len_session_input; 137 virt_addr_started = rte_malloc(NULL, 138 desc_offset + NUM_ENTRY_SYM_CREATE_SESSION 139 * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE); 140 if (virt_addr_started == NULL) { 141 VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap memory"); 142 return -ENOSPC; 143 } 144 phys_addr_started = rte_malloc_virt2iova(virt_addr_started); 145 146 /* address to store indirect vring desc entries */ 147 desc = (struct vring_desc *) 148 ((uint8_t *)virt_addr_started + desc_offset); 149 150 /* ctrl req part */ 151 memcpy(virt_addr_started, ctrl, len_ctrl_req); 152 desc[idx].addr = phys_addr_started; 153 desc[idx].len = len_ctrl_req; 154 desc[idx].flags = VRING_DESC_F_NEXT; 155 desc[idx].next = idx + 1; 156 idx++; 157 len_total += len_ctrl_req; 158 input_offset += len_ctrl_req; 159 160 /* cipher key part */ 161 if (len_cipher_key > 0) { 162 memcpy((uint8_t *)virt_addr_started + len_total, 163 cipher_key, len_cipher_key); 164 165 desc[idx].addr = phys_addr_started + len_total; 166 desc[idx].len = len_cipher_key; 167 desc[idx].flags = VRING_DESC_F_NEXT; 168 desc[idx].next = idx + 1; 169 idx++; 170 len_total += len_cipher_key; 171 input_offset += len_cipher_key; 172 } 173 174 /* auth key part */ 175 if (len_auth_key > 0) { 176 memcpy((uint8_t *)virt_addr_started + len_total, 177 auth_key, len_auth_key); 178 179 desc[idx].addr = phys_addr_started + len_total; 180 desc[idx].len = len_auth_key; 181 desc[idx].flags = VRING_DESC_F_NEXT; 182 desc[idx].next = idx + 1; 183 idx++; 184 len_total += len_auth_key; 185 input_offset += len_auth_key; 186 } 187 188 /* input part */ 189 input = (struct virtio_crypto_session_input *) 190 ((uint8_t *)virt_addr_started + input_offset); 191 input->status = VIRTIO_CRYPTO_ERR; 192 input->session_id = ~0ULL; 193 desc[idx].addr = phys_addr_started + len_total; 194 desc[idx].len = len_session_input; 195 desc[idx].flags = VRING_DESC_F_WRITE; 196 idx++; 197 198 /* use a single desc entry */ 199 vq->vq_ring.desc[head].addr = phys_addr_started + desc_offset; 200 vq->vq_ring.desc[head].len = idx * sizeof(struct vring_desc); 201 vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT; 202 vq->vq_free_cnt--; 203 204 vq->vq_desc_head_idx = vq->vq_ring.desc[head].next; 205 206 vq_update_avail_ring(vq, head); 207 vq_update_avail_idx(vq); 208 209 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d", 210 vq->vq_queue_index); 211 212 virtqueue_notify(vq); 213 214 rte_rmb(); 215 while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) { 216 rte_rmb(); 217 usleep(100); 218 } 219 220 while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) { 221 uint32_t idx, desc_idx, used_idx; 222 struct vring_used_elem *uep; 223 224 used_idx = (uint32_t)(vq->vq_used_cons_idx 225 & (vq->vq_nentries - 1)); 226 uep = &vq->vq_ring.used->ring[used_idx]; 227 idx = (uint32_t) uep->id; 228 desc_idx = idx; 229 230 while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) { 231 desc_idx = vq->vq_ring.desc[desc_idx].next; 232 vq->vq_free_cnt++; 233 } 234 235 vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx; 236 vq->vq_desc_head_idx = idx; 237 238 vq->vq_used_cons_idx++; 239 vq->vq_free_cnt++; 240 } 241 242 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n" 243 "vq->vq_desc_head_idx=%d", 244 vq->vq_free_cnt, vq->vq_desc_head_idx); 245 246 /* get the result */ 247 if (input->status != VIRTIO_CRYPTO_OK) { 248 VIRTIO_CRYPTO_SESSION_LOG_ERR("Something wrong on backend! " 249 "status=%u, session_id=%" PRIu64 "", 250 input->status, input->session_id); 251 rte_free(virt_addr_started); 252 ret = -1; 253 } else { 254 session->session_id = input->session_id; 255 256 VIRTIO_CRYPTO_SESSION_LOG_INFO("Create session successfully, " 257 "session_id=%" PRIu64 "", input->session_id); 258 rte_free(virt_addr_started); 259 ret = 0; 260 } 261 262 return ret; 263 } 264 265 void 266 virtio_crypto_queue_release(struct virtqueue *vq) 267 { 268 struct virtio_crypto_hw *hw; 269 270 PMD_INIT_FUNC_TRACE(); 271 272 if (vq) { 273 hw = vq->hw; 274 /* Select and deactivate the queue */ 275 VTPCI_OPS(hw)->del_queue(hw, vq); 276 277 rte_memzone_free(vq->mz); 278 rte_mempool_free(vq->mpool); 279 rte_free(vq); 280 } 281 } 282 283 #define MPOOL_MAX_NAME_SZ 32 284 285 int 286 virtio_crypto_queue_setup(struct rte_cryptodev *dev, 287 int queue_type, 288 uint16_t vtpci_queue_idx, 289 uint16_t nb_desc, 290 int socket_id, 291 struct virtqueue **pvq) 292 { 293 char vq_name[VIRTQUEUE_MAX_NAME_SZ]; 294 char mpool_name[MPOOL_MAX_NAME_SZ]; 295 const struct rte_memzone *mz; 296 unsigned int vq_size, size; 297 struct virtio_crypto_hw *hw = dev->data->dev_private; 298 struct virtqueue *vq = NULL; 299 uint32_t i = 0; 300 uint32_t j; 301 302 PMD_INIT_FUNC_TRACE(); 303 304 VIRTIO_CRYPTO_INIT_LOG_DBG("setting up queue: %u", vtpci_queue_idx); 305 306 /* 307 * Read the virtqueue size from the Queue Size field 308 * Always power of 2 and if 0 virtqueue does not exist 309 */ 310 vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx); 311 if (vq_size == 0) { 312 VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue does not exist"); 313 return -EINVAL; 314 } 315 VIRTIO_CRYPTO_INIT_LOG_DBG("vq_size: %u", vq_size); 316 317 if (!rte_is_power_of_2(vq_size)) { 318 VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue size is not powerof 2"); 319 return -EINVAL; 320 } 321 322 if (queue_type == VTCRYPTO_DATAQ) { 323 snprintf(vq_name, sizeof(vq_name), "dev%d_dataqueue%d", 324 dev->data->dev_id, vtpci_queue_idx); 325 snprintf(mpool_name, sizeof(mpool_name), 326 "dev%d_dataqueue%d_mpool", 327 dev->data->dev_id, vtpci_queue_idx); 328 } else if (queue_type == VTCRYPTO_CTRLQ) { 329 snprintf(vq_name, sizeof(vq_name), "dev%d_controlqueue", 330 dev->data->dev_id); 331 snprintf(mpool_name, sizeof(mpool_name), 332 "dev%d_controlqueue_mpool", 333 dev->data->dev_id); 334 } 335 size = RTE_ALIGN_CEIL(sizeof(*vq) + 336 vq_size * sizeof(struct vq_desc_extra), 337 RTE_CACHE_LINE_SIZE); 338 vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE, 339 socket_id); 340 if (vq == NULL) { 341 VIRTIO_CRYPTO_INIT_LOG_ERR("Can not allocate virtqueue"); 342 return -ENOMEM; 343 } 344 345 if (queue_type == VTCRYPTO_DATAQ) { 346 /* pre-allocate a mempool and use it in the data plane to 347 * improve performance 348 */ 349 vq->mpool = rte_mempool_lookup(mpool_name); 350 if (vq->mpool == NULL) 351 vq->mpool = rte_mempool_create(mpool_name, 352 vq_size, 353 sizeof(struct virtio_crypto_op_cookie), 354 RTE_CACHE_LINE_SIZE, 0, 355 NULL, NULL, NULL, NULL, socket_id, 356 0); 357 if (!vq->mpool) { 358 VIRTIO_CRYPTO_DRV_LOG_ERR("Virtio Crypto PMD " 359 "Cannot create mempool"); 360 goto mpool_create_err; 361 } 362 for (i = 0; i < vq_size; i++) { 363 vq->vq_descx[i].cookie = 364 rte_zmalloc("crypto PMD op cookie pointer", 365 sizeof(struct virtio_crypto_op_cookie), 366 RTE_CACHE_LINE_SIZE); 367 if (vq->vq_descx[i].cookie == NULL) { 368 VIRTIO_CRYPTO_DRV_LOG_ERR("Failed to " 369 "alloc mem for cookie"); 370 goto cookie_alloc_err; 371 } 372 } 373 } 374 375 vq->hw = hw; 376 vq->dev_id = dev->data->dev_id; 377 vq->vq_queue_index = vtpci_queue_idx; 378 vq->vq_nentries = vq_size; 379 380 /* 381 * Using part of the vring entries is permitted, but the maximum 382 * is vq_size 383 */ 384 if (nb_desc == 0 || nb_desc > vq_size) 385 nb_desc = vq_size; 386 vq->vq_free_cnt = nb_desc; 387 388 /* 389 * Reserve a memzone for vring elements 390 */ 391 size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN); 392 vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN); 393 VIRTIO_CRYPTO_INIT_LOG_DBG("%s vring_size: %d, rounded_vring_size: %d", 394 (queue_type == VTCRYPTO_DATAQ) ? "dataq" : "ctrlq", 395 size, vq->vq_ring_size); 396 397 mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size, 398 socket_id, 0, VIRTIO_PCI_VRING_ALIGN); 399 if (mz == NULL) { 400 if (rte_errno == EEXIST) 401 mz = rte_memzone_lookup(vq_name); 402 if (mz == NULL) { 403 VIRTIO_CRYPTO_INIT_LOG_ERR("not enough memory"); 404 goto mz_reserve_err; 405 } 406 } 407 408 /* 409 * Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit, 410 * and only accepts 32 bit page frame number. 411 * Check if the allocated physical memory exceeds 16TB. 412 */ 413 if ((mz->iova + vq->vq_ring_size - 1) 414 >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) { 415 VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be " 416 "above 16TB!"); 417 goto vring_addr_err; 418 } 419 420 memset(mz->addr, 0, sizeof(mz->len)); 421 vq->mz = mz; 422 vq->vq_ring_mem = mz->iova; 423 vq->vq_ring_virt_mem = mz->addr; 424 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_mem(physical): 0x%"PRIx64, 425 (uint64_t)mz->iova); 426 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_virt_mem: 0x%"PRIx64, 427 (uint64_t)(uintptr_t)mz->addr); 428 429 *pvq = vq; 430 431 return 0; 432 433 vring_addr_err: 434 rte_memzone_free(mz); 435 mz_reserve_err: 436 cookie_alloc_err: 437 rte_mempool_free(vq->mpool); 438 if (i != 0) { 439 for (j = 0; j < i; j++) 440 rte_free(vq->vq_descx[j].cookie); 441 } 442 mpool_create_err: 443 rte_free(vq); 444 return -ENOMEM; 445 } 446 447 static int 448 virtio_crypto_ctrlq_setup(struct rte_cryptodev *dev, uint16_t queue_idx) 449 { 450 int ret; 451 struct virtqueue *vq; 452 struct virtio_crypto_hw *hw = dev->data->dev_private; 453 454 /* if virtio device has started, do not touch the virtqueues */ 455 if (dev->data->dev_started) 456 return 0; 457 458 PMD_INIT_FUNC_TRACE(); 459 460 ret = virtio_crypto_queue_setup(dev, VTCRYPTO_CTRLQ, queue_idx, 461 0, SOCKET_ID_ANY, &vq); 462 if (ret < 0) { 463 VIRTIO_CRYPTO_INIT_LOG_ERR("control vq initialization failed"); 464 return ret; 465 } 466 467 hw->cvq = vq; 468 469 return 0; 470 } 471 472 static void 473 virtio_crypto_free_queues(struct rte_cryptodev *dev) 474 { 475 unsigned int i; 476 struct virtio_crypto_hw *hw = dev->data->dev_private; 477 478 PMD_INIT_FUNC_TRACE(); 479 480 /* control queue release */ 481 virtio_crypto_queue_release(hw->cvq); 482 483 /* data queue release */ 484 for (i = 0; i < hw->max_dataqueues; i++) 485 virtio_crypto_queue_release(dev->data->queue_pairs[i]); 486 } 487 488 static int 489 virtio_crypto_dev_close(struct rte_cryptodev *dev __rte_unused) 490 { 491 return 0; 492 } 493 494 /* 495 * dev_ops for virtio, bare necessities for basic operation 496 */ 497 static struct rte_cryptodev_ops virtio_crypto_dev_ops = { 498 /* Device related operations */ 499 .dev_configure = virtio_crypto_dev_configure, 500 .dev_start = virtio_crypto_dev_start, 501 .dev_stop = virtio_crypto_dev_stop, 502 .dev_close = virtio_crypto_dev_close, 503 .dev_infos_get = virtio_crypto_dev_info_get, 504 505 .stats_get = virtio_crypto_dev_stats_get, 506 .stats_reset = virtio_crypto_dev_stats_reset, 507 508 .queue_pair_setup = virtio_crypto_qp_setup, 509 .queue_pair_release = virtio_crypto_qp_release, 510 511 /* Crypto related operations */ 512 .sym_session_get_size = virtio_crypto_sym_get_session_private_size, 513 .sym_session_configure = virtio_crypto_sym_configure_session, 514 .sym_session_clear = virtio_crypto_sym_clear_session 515 }; 516 517 static void 518 virtio_crypto_update_stats(struct rte_cryptodev *dev, 519 struct rte_cryptodev_stats *stats) 520 { 521 unsigned int i; 522 struct virtio_crypto_hw *hw = dev->data->dev_private; 523 524 PMD_INIT_FUNC_TRACE(); 525 526 if (stats == NULL) { 527 VIRTIO_CRYPTO_DRV_LOG_ERR("invalid pointer"); 528 return; 529 } 530 531 for (i = 0; i < hw->max_dataqueues; i++) { 532 const struct virtqueue *data_queue 533 = dev->data->queue_pairs[i]; 534 if (data_queue == NULL) 535 continue; 536 537 stats->enqueued_count += data_queue->packets_sent_total; 538 stats->enqueue_err_count += data_queue->packets_sent_failed; 539 540 stats->dequeued_count += data_queue->packets_received_total; 541 stats->dequeue_err_count 542 += data_queue->packets_received_failed; 543 } 544 } 545 546 static void 547 virtio_crypto_dev_stats_get(struct rte_cryptodev *dev, 548 struct rte_cryptodev_stats *stats) 549 { 550 PMD_INIT_FUNC_TRACE(); 551 552 virtio_crypto_update_stats(dev, stats); 553 } 554 555 static void 556 virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev) 557 { 558 unsigned int i; 559 struct virtio_crypto_hw *hw = dev->data->dev_private; 560 561 PMD_INIT_FUNC_TRACE(); 562 563 for (i = 0; i < hw->max_dataqueues; i++) { 564 struct virtqueue *data_queue = dev->data->queue_pairs[i]; 565 if (data_queue == NULL) 566 continue; 567 568 data_queue->packets_sent_total = 0; 569 data_queue->packets_sent_failed = 0; 570 571 data_queue->packets_received_total = 0; 572 data_queue->packets_received_failed = 0; 573 } 574 } 575 576 static int 577 virtio_crypto_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id, 578 const struct rte_cryptodev_qp_conf *qp_conf, 579 int socket_id) 580 { 581 int ret; 582 struct virtqueue *vq; 583 584 PMD_INIT_FUNC_TRACE(); 585 586 /* if virtio dev is started, do not touch the virtqueues */ 587 if (dev->data->dev_started) 588 return 0; 589 590 ret = virtio_crypto_queue_setup(dev, VTCRYPTO_DATAQ, queue_pair_id, 591 qp_conf->nb_descriptors, socket_id, &vq); 592 if (ret < 0) { 593 VIRTIO_CRYPTO_INIT_LOG_ERR( 594 "virtio crypto data queue initialization failed\n"); 595 return ret; 596 } 597 598 dev->data->queue_pairs[queue_pair_id] = vq; 599 600 return 0; 601 } 602 603 static int 604 virtio_crypto_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) 605 { 606 struct virtqueue *vq 607 = (struct virtqueue *)dev->data->queue_pairs[queue_pair_id]; 608 609 PMD_INIT_FUNC_TRACE(); 610 611 if (vq == NULL) { 612 VIRTIO_CRYPTO_DRV_LOG_DBG("vq already freed"); 613 return 0; 614 } 615 616 virtio_crypto_queue_release(vq); 617 return 0; 618 } 619 620 static int 621 virtio_negotiate_features(struct virtio_crypto_hw *hw, uint64_t req_features) 622 { 623 uint64_t host_features; 624 625 PMD_INIT_FUNC_TRACE(); 626 627 /* Prepare guest_features: feature that driver wants to support */ 628 VIRTIO_CRYPTO_INIT_LOG_DBG("guest_features before negotiate = %" PRIx64, 629 req_features); 630 631 /* Read device(host) feature bits */ 632 host_features = VTPCI_OPS(hw)->get_features(hw); 633 VIRTIO_CRYPTO_INIT_LOG_DBG("host_features before negotiate = %" PRIx64, 634 host_features); 635 636 /* 637 * Negotiate features: Subset of device feature bits are written back 638 * guest feature bits. 639 */ 640 hw->guest_features = req_features; 641 hw->guest_features = vtpci_cryptodev_negotiate_features(hw, 642 host_features); 643 VIRTIO_CRYPTO_INIT_LOG_DBG("features after negotiate = %" PRIx64, 644 hw->guest_features); 645 646 if (hw->modern) { 647 if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) { 648 VIRTIO_CRYPTO_INIT_LOG_ERR( 649 "VIRTIO_F_VERSION_1 features is not enabled."); 650 return -1; 651 } 652 vtpci_cryptodev_set_status(hw, 653 VIRTIO_CONFIG_STATUS_FEATURES_OK); 654 if (!(vtpci_cryptodev_get_status(hw) & 655 VIRTIO_CONFIG_STATUS_FEATURES_OK)) { 656 VIRTIO_CRYPTO_INIT_LOG_ERR("failed to set FEATURES_OK " 657 "status!"); 658 return -1; 659 } 660 } 661 662 hw->req_guest_features = req_features; 663 664 return 0; 665 } 666 667 /* reset device and renegotiate features if needed */ 668 static int 669 virtio_crypto_init_device(struct rte_cryptodev *cryptodev, 670 uint64_t req_features) 671 { 672 struct virtio_crypto_hw *hw = cryptodev->data->dev_private; 673 struct virtio_crypto_config local_config; 674 struct virtio_crypto_config *config = &local_config; 675 676 PMD_INIT_FUNC_TRACE(); 677 678 /* Reset the device although not necessary at startup */ 679 vtpci_cryptodev_reset(hw); 680 681 /* Tell the host we've noticed this device. */ 682 vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_ACK); 683 684 /* Tell the host we've known how to drive the device. */ 685 vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER); 686 if (virtio_negotiate_features(hw, req_features) < 0) 687 return -1; 688 689 /* Get status of the device */ 690 vtpci_read_cryptodev_config(hw, 691 offsetof(struct virtio_crypto_config, status), 692 &config->status, sizeof(config->status)); 693 if (config->status != VIRTIO_CRYPTO_S_HW_READY) { 694 VIRTIO_CRYPTO_DRV_LOG_ERR("accelerator hardware is " 695 "not ready"); 696 return -1; 697 } 698 699 /* Get number of data queues */ 700 vtpci_read_cryptodev_config(hw, 701 offsetof(struct virtio_crypto_config, max_dataqueues), 702 &config->max_dataqueues, 703 sizeof(config->max_dataqueues)); 704 hw->max_dataqueues = config->max_dataqueues; 705 706 VIRTIO_CRYPTO_INIT_LOG_DBG("hw->max_dataqueues=%d", 707 hw->max_dataqueues); 708 709 return 0; 710 } 711 712 /* 713 * This function is based on probe() function 714 * It returns 0 on success. 715 */ 716 static int 717 crypto_virtio_create(const char *name, struct rte_pci_device *pci_dev, 718 struct rte_cryptodev_pmd_init_params *init_params) 719 { 720 struct rte_cryptodev *cryptodev; 721 struct virtio_crypto_hw *hw; 722 723 PMD_INIT_FUNC_TRACE(); 724 725 cryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device, 726 init_params); 727 if (cryptodev == NULL) 728 return -ENODEV; 729 730 cryptodev->driver_id = cryptodev_virtio_driver_id; 731 cryptodev->dev_ops = &virtio_crypto_dev_ops; 732 733 cryptodev->enqueue_burst = virtio_crypto_pkt_tx_burst; 734 cryptodev->dequeue_burst = virtio_crypto_pkt_rx_burst; 735 736 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 737 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 738 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; 739 740 hw = cryptodev->data->dev_private; 741 hw->dev_id = cryptodev->data->dev_id; 742 hw->virtio_dev_capabilities = virtio_capabilities; 743 744 VIRTIO_CRYPTO_INIT_LOG_DBG("dev %d vendorID=0x%x deviceID=0x%x", 745 cryptodev->data->dev_id, pci_dev->id.vendor_id, 746 pci_dev->id.device_id); 747 748 /* pci device init */ 749 if (vtpci_cryptodev_init(pci_dev, hw)) 750 return -1; 751 752 if (virtio_crypto_init_device(cryptodev, 753 VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0) 754 return -1; 755 756 rte_cryptodev_pmd_probing_finish(cryptodev); 757 758 return 0; 759 } 760 761 static int 762 virtio_crypto_dev_uninit(struct rte_cryptodev *cryptodev) 763 { 764 struct virtio_crypto_hw *hw = cryptodev->data->dev_private; 765 766 PMD_INIT_FUNC_TRACE(); 767 768 if (rte_eal_process_type() == RTE_PROC_SECONDARY) 769 return -EPERM; 770 771 if (cryptodev->data->dev_started) { 772 virtio_crypto_dev_stop(cryptodev); 773 virtio_crypto_dev_close(cryptodev); 774 } 775 776 cryptodev->dev_ops = NULL; 777 cryptodev->enqueue_burst = NULL; 778 cryptodev->dequeue_burst = NULL; 779 780 /* release control queue */ 781 virtio_crypto_queue_release(hw->cvq); 782 783 rte_free(cryptodev->data); 784 cryptodev->data = NULL; 785 786 VIRTIO_CRYPTO_DRV_LOG_INFO("dev_uninit completed"); 787 788 return 0; 789 } 790 791 static int 792 virtio_crypto_dev_configure(struct rte_cryptodev *cryptodev, 793 struct rte_cryptodev_config *config __rte_unused) 794 { 795 struct virtio_crypto_hw *hw = cryptodev->data->dev_private; 796 797 PMD_INIT_FUNC_TRACE(); 798 799 if (virtio_crypto_init_device(cryptodev, 800 VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0) 801 return -1; 802 803 /* setup control queue 804 * [0, 1, ... ,(config->max_dataqueues - 1)] are data queues 805 * config->max_dataqueues is the control queue 806 */ 807 if (virtio_crypto_ctrlq_setup(cryptodev, hw->max_dataqueues) < 0) { 808 VIRTIO_CRYPTO_INIT_LOG_ERR("control queue setup error"); 809 return -1; 810 } 811 virtio_crypto_ctrlq_start(cryptodev); 812 813 return 0; 814 } 815 816 static void 817 virtio_crypto_dev_stop(struct rte_cryptodev *dev) 818 { 819 struct virtio_crypto_hw *hw = dev->data->dev_private; 820 821 PMD_INIT_FUNC_TRACE(); 822 VIRTIO_CRYPTO_DRV_LOG_DBG("virtio_dev_stop"); 823 824 vtpci_cryptodev_reset(hw); 825 826 virtio_crypto_dev_free_mbufs(dev); 827 virtio_crypto_free_queues(dev); 828 829 dev->data->dev_started = 0; 830 } 831 832 static int 833 virtio_crypto_dev_start(struct rte_cryptodev *dev) 834 { 835 struct virtio_crypto_hw *hw = dev->data->dev_private; 836 837 if (dev->data->dev_started) 838 return 0; 839 840 /* Do final configuration before queue engine starts */ 841 virtio_crypto_dataq_start(dev); 842 vtpci_cryptodev_reinit_complete(hw); 843 844 dev->data->dev_started = 1; 845 846 return 0; 847 } 848 849 static void 850 virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev) 851 { 852 uint32_t i; 853 struct virtio_crypto_hw *hw = dev->data->dev_private; 854 855 for (i = 0; i < hw->max_dataqueues; i++) { 856 VIRTIO_CRYPTO_INIT_LOG_DBG("Before freeing dataq[%d] used " 857 "and unused buf", i); 858 VIRTQUEUE_DUMP((struct virtqueue *) 859 dev->data->queue_pairs[i]); 860 861 VIRTIO_CRYPTO_INIT_LOG_DBG("queue_pairs[%d]=%p", 862 i, dev->data->queue_pairs[i]); 863 864 virtqueue_detatch_unused(dev->data->queue_pairs[i]); 865 866 VIRTIO_CRYPTO_INIT_LOG_DBG("After freeing dataq[%d] used and " 867 "unused buf", i); 868 VIRTQUEUE_DUMP( 869 (struct virtqueue *)dev->data->queue_pairs[i]); 870 } 871 } 872 873 static unsigned int 874 virtio_crypto_sym_get_session_private_size( 875 struct rte_cryptodev *dev __rte_unused) 876 { 877 PMD_INIT_FUNC_TRACE(); 878 879 return RTE_ALIGN_CEIL(sizeof(struct virtio_crypto_session), 16); 880 } 881 882 static int 883 virtio_crypto_check_sym_session_paras( 884 struct rte_cryptodev *dev) 885 { 886 struct virtio_crypto_hw *hw; 887 888 PMD_INIT_FUNC_TRACE(); 889 890 if (unlikely(dev == NULL)) { 891 VIRTIO_CRYPTO_SESSION_LOG_ERR("dev is NULL"); 892 return -1; 893 } 894 if (unlikely(dev->data == NULL)) { 895 VIRTIO_CRYPTO_SESSION_LOG_ERR("dev->data is NULL"); 896 return -1; 897 } 898 hw = dev->data->dev_private; 899 if (unlikely(hw == NULL)) { 900 VIRTIO_CRYPTO_SESSION_LOG_ERR("hw is NULL"); 901 return -1; 902 } 903 if (unlikely(hw->cvq == NULL)) { 904 VIRTIO_CRYPTO_SESSION_LOG_ERR("vq is NULL"); 905 return -1; 906 } 907 908 return 0; 909 } 910 911 static int 912 virtio_crypto_check_sym_clear_session_paras( 913 struct rte_cryptodev *dev, 914 struct rte_cryptodev_sym_session *sess) 915 { 916 PMD_INIT_FUNC_TRACE(); 917 918 if (sess == NULL) { 919 VIRTIO_CRYPTO_SESSION_LOG_ERR("sym_session is NULL"); 920 return -1; 921 } 922 923 return virtio_crypto_check_sym_session_paras(dev); 924 } 925 926 #define NUM_ENTRY_SYM_CLEAR_SESSION 2 927 928 static void 929 virtio_crypto_sym_clear_session( 930 struct rte_cryptodev *dev, 931 struct rte_cryptodev_sym_session *sess) 932 { 933 struct virtio_crypto_hw *hw; 934 struct virtqueue *vq; 935 struct virtio_crypto_session *session; 936 struct virtio_crypto_op_ctrl_req *ctrl; 937 struct vring_desc *desc; 938 uint8_t *status; 939 uint8_t needed = 1; 940 uint32_t head; 941 uint8_t *malloc_virt_addr; 942 uint64_t malloc_phys_addr; 943 uint8_t len_inhdr = sizeof(struct virtio_crypto_inhdr); 944 uint32_t len_op_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req); 945 uint32_t desc_offset = len_op_ctrl_req + len_inhdr; 946 947 PMD_INIT_FUNC_TRACE(); 948 949 if (virtio_crypto_check_sym_clear_session_paras(dev, sess) < 0) 950 return; 951 952 hw = dev->data->dev_private; 953 vq = hw->cvq; 954 session = (struct virtio_crypto_session *)sess->driver_priv_data; 955 956 VIRTIO_CRYPTO_SESSION_LOG_INFO("vq->vq_desc_head_idx = %d, " 957 "vq = %p", vq->vq_desc_head_idx, vq); 958 959 if (vq->vq_free_cnt < needed) { 960 VIRTIO_CRYPTO_SESSION_LOG_ERR( 961 "vq->vq_free_cnt = %d is less than %d, " 962 "not enough", vq->vq_free_cnt, needed); 963 return; 964 } 965 966 /* 967 * malloc memory to store information of ctrl request op, 968 * returned status and desc vring 969 */ 970 malloc_virt_addr = rte_malloc(NULL, len_op_ctrl_req + len_inhdr 971 + NUM_ENTRY_SYM_CLEAR_SESSION 972 * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE); 973 if (malloc_virt_addr == NULL) { 974 VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap room"); 975 return; 976 } 977 malloc_phys_addr = rte_malloc_virt2iova(malloc_virt_addr); 978 979 /* assign ctrl request op part */ 980 ctrl = (struct virtio_crypto_op_ctrl_req *)malloc_virt_addr; 981 ctrl->header.opcode = VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION; 982 /* default data virtqueue is 0 */ 983 ctrl->header.queue_id = 0; 984 ctrl->u.destroy_session.session_id = session->session_id; 985 986 /* status part */ 987 status = &(((struct virtio_crypto_inhdr *) 988 ((uint8_t *)malloc_virt_addr + len_op_ctrl_req))->status); 989 *status = VIRTIO_CRYPTO_ERR; 990 991 /* indirect desc vring part */ 992 desc = (struct vring_desc *)((uint8_t *)malloc_virt_addr 993 + desc_offset); 994 995 /* ctrl request part */ 996 desc[0].addr = malloc_phys_addr; 997 desc[0].len = len_op_ctrl_req; 998 desc[0].flags = VRING_DESC_F_NEXT; 999 desc[0].next = 1; 1000 1001 /* status part */ 1002 desc[1].addr = malloc_phys_addr + len_op_ctrl_req; 1003 desc[1].len = len_inhdr; 1004 desc[1].flags = VRING_DESC_F_WRITE; 1005 1006 /* use only a single desc entry */ 1007 head = vq->vq_desc_head_idx; 1008 vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT; 1009 vq->vq_ring.desc[head].addr = malloc_phys_addr + desc_offset; 1010 vq->vq_ring.desc[head].len 1011 = NUM_ENTRY_SYM_CLEAR_SESSION 1012 * sizeof(struct vring_desc); 1013 vq->vq_free_cnt -= needed; 1014 1015 vq->vq_desc_head_idx = vq->vq_ring.desc[head].next; 1016 1017 vq_update_avail_ring(vq, head); 1018 vq_update_avail_idx(vq); 1019 1020 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d", 1021 vq->vq_queue_index); 1022 1023 virtqueue_notify(vq); 1024 1025 rte_rmb(); 1026 while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) { 1027 rte_rmb(); 1028 usleep(100); 1029 } 1030 1031 while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) { 1032 uint32_t idx, desc_idx, used_idx; 1033 struct vring_used_elem *uep; 1034 1035 used_idx = (uint32_t)(vq->vq_used_cons_idx 1036 & (vq->vq_nentries - 1)); 1037 uep = &vq->vq_ring.used->ring[used_idx]; 1038 idx = (uint32_t) uep->id; 1039 desc_idx = idx; 1040 while (vq->vq_ring.desc[desc_idx].flags 1041 & VRING_DESC_F_NEXT) { 1042 desc_idx = vq->vq_ring.desc[desc_idx].next; 1043 vq->vq_free_cnt++; 1044 } 1045 1046 vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx; 1047 vq->vq_desc_head_idx = idx; 1048 vq->vq_used_cons_idx++; 1049 vq->vq_free_cnt++; 1050 } 1051 1052 if (*status != VIRTIO_CRYPTO_OK) { 1053 VIRTIO_CRYPTO_SESSION_LOG_ERR("Close session failed " 1054 "status=%"PRIu32", session_id=%"PRIu64"", 1055 *status, session->session_id); 1056 rte_free(malloc_virt_addr); 1057 return; 1058 } 1059 1060 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n" 1061 "vq->vq_desc_head_idx=%d", 1062 vq->vq_free_cnt, vq->vq_desc_head_idx); 1063 1064 VIRTIO_CRYPTO_SESSION_LOG_INFO("Close session %"PRIu64" successfully ", 1065 session->session_id); 1066 1067 rte_free(malloc_virt_addr); 1068 } 1069 1070 static struct rte_crypto_cipher_xform * 1071 virtio_crypto_get_cipher_xform(struct rte_crypto_sym_xform *xform) 1072 { 1073 do { 1074 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) 1075 return &xform->cipher; 1076 1077 xform = xform->next; 1078 } while (xform); 1079 1080 return NULL; 1081 } 1082 1083 static struct rte_crypto_auth_xform * 1084 virtio_crypto_get_auth_xform(struct rte_crypto_sym_xform *xform) 1085 { 1086 do { 1087 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) 1088 return &xform->auth; 1089 1090 xform = xform->next; 1091 } while (xform); 1092 1093 return NULL; 1094 } 1095 1096 /** Get xform chain order */ 1097 static int 1098 virtio_crypto_get_chain_order(struct rte_crypto_sym_xform *xform) 1099 { 1100 if (xform == NULL) 1101 return -1; 1102 1103 /* Cipher Only */ 1104 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 1105 xform->next == NULL) 1106 return VIRTIO_CRYPTO_CMD_CIPHER; 1107 1108 /* Authentication Only */ 1109 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 1110 xform->next == NULL) 1111 return VIRTIO_CRYPTO_CMD_AUTH; 1112 1113 /* Authenticate then Cipher */ 1114 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && 1115 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) 1116 return VIRTIO_CRYPTO_CMD_HASH_CIPHER; 1117 1118 /* Cipher then Authenticate */ 1119 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 1120 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) 1121 return VIRTIO_CRYPTO_CMD_CIPHER_HASH; 1122 1123 return -1; 1124 } 1125 1126 static int 1127 virtio_crypto_sym_pad_cipher_param( 1128 struct virtio_crypto_cipher_session_para *para, 1129 struct rte_crypto_cipher_xform *cipher_xform) 1130 { 1131 switch (cipher_xform->algo) { 1132 case RTE_CRYPTO_CIPHER_AES_CBC: 1133 para->algo = VIRTIO_CRYPTO_CIPHER_AES_CBC; 1134 break; 1135 default: 1136 VIRTIO_CRYPTO_SESSION_LOG_ERR("Crypto: Unsupported " 1137 "Cipher alg %u", cipher_xform->algo); 1138 return -1; 1139 } 1140 1141 para->keylen = cipher_xform->key.length; 1142 switch (cipher_xform->op) { 1143 case RTE_CRYPTO_CIPHER_OP_ENCRYPT: 1144 para->op = VIRTIO_CRYPTO_OP_ENCRYPT; 1145 break; 1146 case RTE_CRYPTO_CIPHER_OP_DECRYPT: 1147 para->op = VIRTIO_CRYPTO_OP_DECRYPT; 1148 break; 1149 default: 1150 VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported cipher operation " 1151 "parameter"); 1152 return -1; 1153 } 1154 1155 return 0; 1156 } 1157 1158 static int 1159 virtio_crypto_sym_pad_auth_param( 1160 struct virtio_crypto_op_ctrl_req *ctrl, 1161 struct rte_crypto_auth_xform *auth_xform) 1162 { 1163 uint32_t *algo; 1164 struct virtio_crypto_alg_chain_session_para *para = 1165 &(ctrl->u.sym_create_session.u.chain.para); 1166 1167 switch (ctrl->u.sym_create_session.u.chain.para.hash_mode) { 1168 case VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN: 1169 algo = &(para->u.hash_param.algo); 1170 break; 1171 case VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH: 1172 algo = &(para->u.mac_param.algo); 1173 break; 1174 default: 1175 VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported hash mode %u " 1176 "specified", 1177 ctrl->u.sym_create_session.u.chain.para.hash_mode); 1178 return -1; 1179 } 1180 1181 switch (auth_xform->algo) { 1182 case RTE_CRYPTO_AUTH_SHA1_HMAC: 1183 *algo = VIRTIO_CRYPTO_MAC_HMAC_SHA1; 1184 break; 1185 default: 1186 VIRTIO_CRYPTO_SESSION_LOG_ERR( 1187 "Crypto: Undefined Hash algo %u specified", 1188 auth_xform->algo); 1189 return -1; 1190 } 1191 1192 return 0; 1193 } 1194 1195 static int 1196 virtio_crypto_sym_pad_op_ctrl_req( 1197 struct virtio_crypto_op_ctrl_req *ctrl, 1198 struct rte_crypto_sym_xform *xform, bool is_chainned, 1199 uint8_t *cipher_key_data, uint8_t *auth_key_data, 1200 struct virtio_crypto_session *session) 1201 { 1202 int ret; 1203 struct rte_crypto_auth_xform *auth_xform = NULL; 1204 struct rte_crypto_cipher_xform *cipher_xform = NULL; 1205 1206 /* Get cipher xform from crypto xform chain */ 1207 cipher_xform = virtio_crypto_get_cipher_xform(xform); 1208 if (cipher_xform) { 1209 if (cipher_xform->key.length > VIRTIO_CRYPTO_MAX_KEY_SIZE) { 1210 VIRTIO_CRYPTO_SESSION_LOG_ERR( 1211 "cipher key size cannot be longer than %u", 1212 VIRTIO_CRYPTO_MAX_KEY_SIZE); 1213 return -1; 1214 } 1215 if (cipher_xform->iv.length > VIRTIO_CRYPTO_MAX_IV_SIZE) { 1216 VIRTIO_CRYPTO_SESSION_LOG_ERR( 1217 "cipher IV size cannot be longer than %u", 1218 VIRTIO_CRYPTO_MAX_IV_SIZE); 1219 return -1; 1220 } 1221 if (is_chainned) 1222 ret = virtio_crypto_sym_pad_cipher_param( 1223 &ctrl->u.sym_create_session.u.chain.para 1224 .cipher_param, cipher_xform); 1225 else 1226 ret = virtio_crypto_sym_pad_cipher_param( 1227 &ctrl->u.sym_create_session.u.cipher.para, 1228 cipher_xform); 1229 1230 if (ret < 0) { 1231 VIRTIO_CRYPTO_SESSION_LOG_ERR( 1232 "pad cipher parameter failed"); 1233 return -1; 1234 } 1235 1236 memcpy(cipher_key_data, cipher_xform->key.data, 1237 cipher_xform->key.length); 1238 1239 session->iv.offset = cipher_xform->iv.offset; 1240 session->iv.length = cipher_xform->iv.length; 1241 } 1242 1243 /* Get auth xform from crypto xform chain */ 1244 auth_xform = virtio_crypto_get_auth_xform(xform); 1245 if (auth_xform) { 1246 /* FIXME: support VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */ 1247 struct virtio_crypto_alg_chain_session_para *para = 1248 &(ctrl->u.sym_create_session.u.chain.para); 1249 if (auth_xform->key.length) { 1250 if (auth_xform->key.length > 1251 VIRTIO_CRYPTO_MAX_KEY_SIZE) { 1252 VIRTIO_CRYPTO_SESSION_LOG_ERR( 1253 "auth key size cannot be longer than %u", 1254 VIRTIO_CRYPTO_MAX_KEY_SIZE); 1255 return -1; 1256 } 1257 para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH; 1258 para->u.mac_param.auth_key_len = 1259 (uint32_t)auth_xform->key.length; 1260 para->u.mac_param.hash_result_len = 1261 auth_xform->digest_length; 1262 memcpy(auth_key_data, auth_xform->key.data, 1263 auth_xform->key.length); 1264 } else { 1265 para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN; 1266 para->u.hash_param.hash_result_len = 1267 auth_xform->digest_length; 1268 } 1269 1270 ret = virtio_crypto_sym_pad_auth_param(ctrl, auth_xform); 1271 if (ret < 0) { 1272 VIRTIO_CRYPTO_SESSION_LOG_ERR("pad auth parameter " 1273 "failed"); 1274 return -1; 1275 } 1276 } 1277 1278 return 0; 1279 } 1280 1281 static int 1282 virtio_crypto_check_sym_configure_session_paras( 1283 struct rte_cryptodev *dev, 1284 struct rte_crypto_sym_xform *xform, 1285 struct rte_cryptodev_sym_session *sym_sess) 1286 { 1287 if (unlikely(xform == NULL) || unlikely(sym_sess == NULL)) { 1288 VIRTIO_CRYPTO_SESSION_LOG_ERR("NULL pointer"); 1289 return -1; 1290 } 1291 1292 if (virtio_crypto_check_sym_session_paras(dev) < 0) 1293 return -1; 1294 1295 return 0; 1296 } 1297 1298 static int 1299 virtio_crypto_sym_configure_session( 1300 struct rte_cryptodev *dev, 1301 struct rte_crypto_sym_xform *xform, 1302 struct rte_cryptodev_sym_session *sess) 1303 { 1304 int ret; 1305 struct virtio_crypto_session *session; 1306 struct virtio_crypto_op_ctrl_req *ctrl_req; 1307 enum virtio_crypto_cmd_id cmd_id; 1308 uint8_t cipher_key_data[VIRTIO_CRYPTO_MAX_KEY_SIZE] = {0}; 1309 uint8_t auth_key_data[VIRTIO_CRYPTO_MAX_KEY_SIZE] = {0}; 1310 struct virtio_crypto_hw *hw; 1311 struct virtqueue *control_vq; 1312 1313 PMD_INIT_FUNC_TRACE(); 1314 1315 ret = virtio_crypto_check_sym_configure_session_paras(dev, xform, 1316 sess); 1317 if (ret < 0) { 1318 VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid parameters"); 1319 return ret; 1320 } 1321 session = (struct virtio_crypto_session *)sess->driver_priv_data; 1322 memset(session, 0, sizeof(struct virtio_crypto_session)); 1323 ctrl_req = &session->ctrl; 1324 ctrl_req->header.opcode = VIRTIO_CRYPTO_CIPHER_CREATE_SESSION; 1325 /* FIXME: support multiqueue */ 1326 ctrl_req->header.queue_id = 0; 1327 1328 hw = dev->data->dev_private; 1329 control_vq = hw->cvq; 1330 1331 cmd_id = virtio_crypto_get_chain_order(xform); 1332 if (cmd_id == VIRTIO_CRYPTO_CMD_CIPHER_HASH) 1333 ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order 1334 = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH; 1335 if (cmd_id == VIRTIO_CRYPTO_CMD_HASH_CIPHER) 1336 ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order 1337 = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER; 1338 1339 switch (cmd_id) { 1340 case VIRTIO_CRYPTO_CMD_CIPHER_HASH: 1341 case VIRTIO_CRYPTO_CMD_HASH_CIPHER: 1342 ctrl_req->u.sym_create_session.op_type 1343 = VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING; 1344 1345 ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req, 1346 xform, true, cipher_key_data, auth_key_data, session); 1347 if (ret < 0) { 1348 VIRTIO_CRYPTO_SESSION_LOG_ERR( 1349 "padding sym op ctrl req failed"); 1350 goto error_out; 1351 } 1352 ret = virtio_crypto_send_command(control_vq, ctrl_req, 1353 cipher_key_data, auth_key_data, session); 1354 if (ret < 0) { 1355 VIRTIO_CRYPTO_SESSION_LOG_ERR( 1356 "create session failed: %d", ret); 1357 goto error_out; 1358 } 1359 break; 1360 case VIRTIO_CRYPTO_CMD_CIPHER: 1361 ctrl_req->u.sym_create_session.op_type 1362 = VIRTIO_CRYPTO_SYM_OP_CIPHER; 1363 ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req, xform, 1364 false, cipher_key_data, auth_key_data, session); 1365 if (ret < 0) { 1366 VIRTIO_CRYPTO_SESSION_LOG_ERR( 1367 "padding sym op ctrl req failed"); 1368 goto error_out; 1369 } 1370 ret = virtio_crypto_send_command(control_vq, ctrl_req, 1371 cipher_key_data, NULL, session); 1372 if (ret < 0) { 1373 VIRTIO_CRYPTO_SESSION_LOG_ERR( 1374 "create session failed: %d", ret); 1375 goto error_out; 1376 } 1377 break; 1378 default: 1379 VIRTIO_CRYPTO_SESSION_LOG_ERR( 1380 "Unsupported operation chain order parameter"); 1381 goto error_out; 1382 } 1383 return 0; 1384 1385 error_out: 1386 return -1; 1387 } 1388 1389 static void 1390 virtio_crypto_dev_info_get(struct rte_cryptodev *dev, 1391 struct rte_cryptodev_info *info) 1392 { 1393 struct virtio_crypto_hw *hw = dev->data->dev_private; 1394 1395 PMD_INIT_FUNC_TRACE(); 1396 1397 if (info != NULL) { 1398 info->driver_id = cryptodev_virtio_driver_id; 1399 info->feature_flags = dev->feature_flags; 1400 info->max_nb_queue_pairs = hw->max_dataqueues; 1401 /* No limit of number of sessions */ 1402 info->sym.max_nb_sessions = 0; 1403 info->capabilities = hw->virtio_dev_capabilities; 1404 } 1405 } 1406 1407 static int 1408 crypto_virtio_pci_probe( 1409 struct rte_pci_driver *pci_drv __rte_unused, 1410 struct rte_pci_device *pci_dev) 1411 { 1412 struct rte_cryptodev_pmd_init_params init_params = { 1413 .name = "", 1414 .socket_id = pci_dev->device.numa_node, 1415 .private_data_size = sizeof(struct virtio_crypto_hw) 1416 }; 1417 char name[RTE_CRYPTODEV_NAME_MAX_LEN]; 1418 1419 VIRTIO_CRYPTO_DRV_LOG_DBG("Found Crypto device at %02x:%02x.%x", 1420 pci_dev->addr.bus, 1421 pci_dev->addr.devid, 1422 pci_dev->addr.function); 1423 1424 rte_pci_device_name(&pci_dev->addr, name, sizeof(name)); 1425 1426 return crypto_virtio_create(name, pci_dev, &init_params); 1427 } 1428 1429 static int 1430 crypto_virtio_pci_remove( 1431 struct rte_pci_device *pci_dev __rte_unused) 1432 { 1433 struct rte_cryptodev *cryptodev; 1434 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 1435 1436 if (pci_dev == NULL) 1437 return -EINVAL; 1438 1439 rte_pci_device_name(&pci_dev->addr, cryptodev_name, 1440 sizeof(cryptodev_name)); 1441 1442 cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name); 1443 if (cryptodev == NULL) 1444 return -ENODEV; 1445 1446 return virtio_crypto_dev_uninit(cryptodev); 1447 } 1448 1449 static struct rte_pci_driver rte_virtio_crypto_driver = { 1450 .id_table = pci_id_virtio_crypto_map, 1451 .drv_flags = 0, 1452 .probe = crypto_virtio_pci_probe, 1453 .remove = crypto_virtio_pci_remove 1454 }; 1455 1456 static struct cryptodev_driver virtio_crypto_drv; 1457 1458 RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_VIRTIO_PMD, rte_virtio_crypto_driver); 1459 RTE_PMD_REGISTER_CRYPTO_DRIVER(virtio_crypto_drv, 1460 rte_virtio_crypto_driver.driver, 1461 cryptodev_virtio_driver_id); 1462 RTE_LOG_REGISTER_SUFFIX(virtio_crypto_logtype_init, init, NOTICE); 1463 RTE_LOG_REGISTER_SUFFIX(virtio_crypto_logtype_session, session, NOTICE); 1464 RTE_LOG_REGISTER_SUFFIX(virtio_crypto_logtype_rx, rx, NOTICE); 1465 RTE_LOG_REGISTER_SUFFIX(virtio_crypto_logtype_tx, tx, NOTICE); 1466 RTE_LOG_REGISTER_SUFFIX(virtio_crypto_logtype_driver, driver, NOTICE); 1467