1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUcryptoION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "vbdev_crypto.h" 35 36 #include "spdk/env.h" 37 #include "spdk/endian.h" 38 #include "spdk/thread.h" 39 #include "spdk/bdev_module.h" 40 #include "spdk/log.h" 41 42 #include <rte_config.h> 43 #include <rte_version.h> 44 #include <rte_bus_vdev.h> 45 #include <rte_crypto.h> 46 #include <rte_cryptodev.h> 47 #include <rte_cryptodev_pmd.h> 48 49 /* To add support for new device types, follow the examples of the following... 50 * Note that the string names are defined by the DPDK PMD in question so be 51 * sure to use the exact names. 52 */ 53 #define MAX_NUM_DRV_TYPES 2 54 55 /* The VF spread is the number of queue pairs between virtual functions, we use this to 56 * load balance the QAT device. 57 */ 58 #define QAT_VF_SPREAD 32 59 static uint8_t g_qat_total_qp = 0; 60 static uint8_t g_next_qat_index; 61 62 const char *g_driver_names[MAX_NUM_DRV_TYPES] = { AESNI_MB, QAT }; 63 64 /* Global list of available crypto devices. */ 65 struct vbdev_dev { 66 struct rte_cryptodev_info cdev_info; /* includes device friendly name */ 67 uint8_t cdev_id; /* identifier for the device */ 68 TAILQ_ENTRY(vbdev_dev) link; 69 }; 70 static TAILQ_HEAD(, vbdev_dev) g_vbdev_devs = TAILQ_HEAD_INITIALIZER(g_vbdev_devs); 71 72 /* Global list and lock for unique device/queue pair combos. We keep 1 list per supported PMD 73 * so that we can optimize per PMD where it make sense. For example, with QAT there an optimal 74 * pattern for assigning queue pairs where with AESNI there is not. 75 */ 76 struct device_qp { 77 struct vbdev_dev *device; /* ptr to crypto device */ 78 uint8_t qp; /* queue pair for this node */ 79 bool in_use; /* whether this node is in use or not */ 80 uint8_t index; /* used by QAT to load balance placement of qpairs */ 81 TAILQ_ENTRY(device_qp) link; 82 }; 83 static TAILQ_HEAD(, device_qp) g_device_qp_qat = TAILQ_HEAD_INITIALIZER(g_device_qp_qat); 84 static TAILQ_HEAD(, device_qp) g_device_qp_aesni_mb = TAILQ_HEAD_INITIALIZER(g_device_qp_aesni_mb); 85 static pthread_mutex_t g_device_qp_lock = PTHREAD_MUTEX_INITIALIZER; 86 87 88 /* In order to limit the number of resources we need to do one crypto 89 * operation per LBA (we use LBA as IV), we tell the bdev layer that 90 * our max IO size is something reasonable. Units here are in bytes. 91 */ 92 #define CRYPTO_MAX_IO (64 * 1024) 93 94 /* This controls how many ops will be dequeued from the crypto driver in one run 95 * of the poller. It is mainly a performance knob as it effectively determines how 96 * much work the poller has to do. However even that can vary between crypto drivers 97 * as the AESNI_MB driver for example does all the crypto work on dequeue whereas the 98 * QAT driver just dequeues what has been completed already. 99 */ 100 #define MAX_DEQUEUE_BURST_SIZE 64 101 102 /* When enqueueing, we need to supply the crypto driver with an array of pointers to 103 * operation structs. As each of these can be max 512B, we can adjust the CRYPTO_MAX_IO 104 * value in conjunction with the other defines to make sure we're not using crazy amounts 105 * of memory. All of these numbers can and probably should be adjusted based on the 106 * workload. By default we'll use the worst case (smallest) block size for the 107 * minimum number of array entries. As an example, a CRYPTO_MAX_IO size of 64K with 512B 108 * blocks would give us an enqueue array size of 128. 109 */ 110 #define MAX_ENQUEUE_ARRAY_SIZE (CRYPTO_MAX_IO / 512) 111 112 /* The number of MBUFS we need must be a power of two and to support other small IOs 113 * in addition to the limits mentioned above, we go to the next power of two. It is 114 * big number because it is one mempool for source and destination mbufs. It may 115 * need to be bigger to support multiple crypto drivers at once. 116 */ 117 #define NUM_MBUFS 32768 118 #define POOL_CACHE_SIZE 256 119 #define MAX_CRYPTO_VOLUMES 128 120 #define NUM_SESSIONS (2 * MAX_CRYPTO_VOLUMES) 121 #define SESS_MEMPOOL_CACHE_SIZE 0 122 uint8_t g_number_of_claimed_volumes = 0; 123 124 /* This is the max number of IOs we can supply to any crypto device QP at one time. 125 * It can vary between drivers. 126 */ 127 #define CRYPTO_QP_DESCRIPTORS 2048 128 129 /* Specific to AES_CBC. */ 130 #define AES_CBC_IV_LENGTH 16 131 #define AES_CBC_KEY_LENGTH 16 132 #define AES_XTS_KEY_LENGTH 16 /* XTS uses 2 keys, each of this size. */ 133 #define AESNI_MB_NUM_QP 64 134 135 /* Common for suported devices. */ 136 #define IV_OFFSET (sizeof(struct rte_crypto_op) + \ 137 sizeof(struct rte_crypto_sym_op)) 138 #define QUEUED_OP_OFFSET (IV_OFFSET + AES_CBC_IV_LENGTH) 139 140 static void _complete_internal_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg); 141 static void _complete_internal_read(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg); 142 static void _complete_internal_write(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg); 143 static void vbdev_crypto_examine(struct spdk_bdev *bdev); 144 static int vbdev_crypto_claim(const char *bdev_name); 145 static void vbdev_crypto_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io); 146 147 /* List of crypto_bdev names and their base bdevs via configuration file. */ 148 struct bdev_names { 149 char *vbdev_name; /* name of the vbdev to create */ 150 char *bdev_name; /* base bdev name */ 151 152 /* Note, for dev/test we allow use of key in the config file, for production 153 * use, you must use an RPC to specify the key for security reasons. 154 */ 155 uint8_t *key; /* key per bdev */ 156 char *drv_name; /* name of the crypto device driver */ 157 char *cipher; /* AES_CBC or AES_XTS */ 158 uint8_t *key2; /* key #2 for AES_XTS, per bdev */ 159 TAILQ_ENTRY(bdev_names) link; 160 }; 161 static TAILQ_HEAD(, bdev_names) g_bdev_names = TAILQ_HEAD_INITIALIZER(g_bdev_names); 162 163 /* List of virtual bdevs and associated info for each. We keep the device friendly name here even 164 * though its also in the device struct because we use it early on. 165 */ 166 struct vbdev_crypto { 167 struct spdk_bdev *base_bdev; /* the thing we're attaching to */ 168 struct spdk_bdev_desc *base_desc; /* its descriptor we get from open */ 169 struct spdk_bdev crypto_bdev; /* the crypto virtual bdev */ 170 uint8_t *key; /* key per bdev */ 171 uint8_t *key2; /* for XTS */ 172 uint8_t *xts_key; /* key + key 2 */ 173 char *drv_name; /* name of the crypto device driver */ 174 char *cipher; /* cipher used */ 175 struct rte_cryptodev_sym_session *session_encrypt; /* encryption session for this bdev */ 176 struct rte_cryptodev_sym_session *session_decrypt; /* decryption session for this bdev */ 177 struct rte_crypto_sym_xform cipher_xform; /* crypto control struct for this bdev */ 178 TAILQ_ENTRY(vbdev_crypto) link; 179 struct spdk_thread *thread; /* thread where base device is opened */ 180 }; 181 static TAILQ_HEAD(, vbdev_crypto) g_vbdev_crypto = TAILQ_HEAD_INITIALIZER(g_vbdev_crypto); 182 183 /* Shared mempools between all devices on this system */ 184 static struct rte_mempool *g_session_mp = NULL; 185 static struct rte_mempool *g_session_mp_priv = NULL; 186 static struct spdk_mempool *g_mbuf_mp = NULL; /* mbuf mempool */ 187 static struct rte_mempool *g_crypto_op_mp = NULL; /* crypto operations, must be rte* mempool */ 188 189 /* For queueing up crypto operations that we can't submit for some reason */ 190 struct vbdev_crypto_op { 191 uint8_t cdev_id; 192 uint8_t qp; 193 struct rte_crypto_op *crypto_op; 194 struct spdk_bdev_io *bdev_io; 195 TAILQ_ENTRY(vbdev_crypto_op) link; 196 }; 197 #define QUEUED_OP_LENGTH (sizeof(struct vbdev_crypto_op)) 198 199 /* The crypto vbdev channel struct. It is allocated and freed on my behalf by the io channel code. 200 * We store things in here that are needed on per thread basis like the base_channel for this thread, 201 * and the poller for this thread. 202 */ 203 struct crypto_io_channel { 204 struct spdk_io_channel *base_ch; /* IO channel of base device */ 205 struct spdk_poller *poller; /* completion poller */ 206 struct device_qp *device_qp; /* unique device/qp combination for this channel */ 207 TAILQ_HEAD(, spdk_bdev_io) pending_cry_ios; /* outstanding operations to the crypto device */ 208 struct spdk_io_channel_iter *iter; /* used with for_each_channel in reset */ 209 TAILQ_HEAD(, vbdev_crypto_op) queued_cry_ops; /* queued for re-submission to CryptoDev */ 210 }; 211 212 /* This is the crypto per IO context that the bdev layer allocates for us opaquely and attaches to 213 * each IO for us. 214 */ 215 struct crypto_bdev_io { 216 int cryop_cnt_remaining; /* counter used when completing crypto ops */ 217 struct crypto_io_channel *crypto_ch; /* need to store for crypto completion handling */ 218 struct vbdev_crypto *crypto_bdev; /* the crypto node struct associated with this IO */ 219 struct spdk_bdev_io *orig_io; /* the original IO */ 220 struct spdk_bdev_io *read_io; /* the read IO we issued */ 221 int8_t bdev_io_status; /* the status we'll report back on the bdev IO */ 222 bool on_pending_list; 223 /* Used for the single contiguous buffer that serves as the crypto destination target for writes */ 224 uint64_t aux_num_blocks; /* num of blocks for the contiguous buffer */ 225 uint64_t aux_offset_blocks; /* block offset on media */ 226 void *aux_buf_raw; /* raw buffer that the bdev layer gave us for write buffer */ 227 struct iovec aux_buf_iov; /* iov representing aligned contig write buffer */ 228 229 /* for bdev_io_wait */ 230 struct spdk_bdev_io_wait_entry bdev_io_wait; 231 struct spdk_io_channel *ch; 232 }; 233 234 /* Called by vbdev_crypto_init_crypto_drivers() to init each discovered crypto device */ 235 static int 236 create_vbdev_dev(uint8_t index, uint16_t num_lcores) 237 { 238 struct vbdev_dev *device; 239 uint8_t j, cdev_id, cdrv_id; 240 struct device_qp *dev_qp; 241 struct device_qp *tmp_qp; 242 int rc; 243 TAILQ_HEAD(device_qps, device_qp) *dev_qp_head; 244 245 device = calloc(1, sizeof(struct vbdev_dev)); 246 if (!device) { 247 return -ENOMEM; 248 } 249 250 /* Get details about this device. */ 251 rte_cryptodev_info_get(index, &device->cdev_info); 252 cdrv_id = device->cdev_info.driver_id; 253 cdev_id = device->cdev_id = index; 254 255 /* Before going any further, make sure we have enough resources for this 256 * device type to function. We need a unique queue pair per core accross each 257 * device type to remain lockless.... 258 */ 259 if ((rte_cryptodev_device_count_by_driver(cdrv_id) * 260 device->cdev_info.max_nb_queue_pairs) < num_lcores) { 261 SPDK_ERRLOG("Insufficient unique queue pairs available for %s\n", 262 device->cdev_info.driver_name); 263 SPDK_ERRLOG("Either add more crypto devices or decrease core count\n"); 264 rc = -EINVAL; 265 goto err; 266 } 267 268 /* Setup queue pairs. */ 269 struct rte_cryptodev_config conf = { 270 .nb_queue_pairs = device->cdev_info.max_nb_queue_pairs, 271 .socket_id = SPDK_ENV_SOCKET_ID_ANY 272 }; 273 274 rc = rte_cryptodev_configure(cdev_id, &conf); 275 if (rc < 0) { 276 SPDK_ERRLOG("Failed to configure cryptodev %u\n", cdev_id); 277 rc = -EINVAL; 278 goto err; 279 } 280 281 struct rte_cryptodev_qp_conf qp_conf = { 282 .nb_descriptors = CRYPTO_QP_DESCRIPTORS, 283 #if RTE_VERSION >= RTE_VERSION_NUM(19, 02, 0, 0) 284 .mp_session = g_session_mp, 285 .mp_session_private = g_session_mp_priv, 286 #endif 287 }; 288 289 /* Pre-setup all potential qpairs now and assign them in the channel 290 * callback. If we were to create them there, we'd have to stop the 291 * entire device affecting all other threads that might be using it 292 * even on other queue pairs. 293 */ 294 for (j = 0; j < device->cdev_info.max_nb_queue_pairs; j++) { 295 #if RTE_VERSION >= RTE_VERSION_NUM(19, 02, 0, 0) 296 rc = rte_cryptodev_queue_pair_setup(cdev_id, j, &qp_conf, SOCKET_ID_ANY); 297 #else 298 rc = rte_cryptodev_queue_pair_setup(cdev_id, j, &qp_conf, SOCKET_ID_ANY, 299 g_session_mp); 300 #endif 301 302 if (rc < 0) { 303 SPDK_ERRLOG("Failed to setup queue pair %u on " 304 "cryptodev %u\n", j, cdev_id); 305 rc = -EINVAL; 306 goto err; 307 } 308 } 309 310 rc = rte_cryptodev_start(cdev_id); 311 if (rc < 0) { 312 SPDK_ERRLOG("Failed to start device %u: error %d\n", 313 cdev_id, rc); 314 rc = -EINVAL; 315 goto err; 316 } 317 318 /* Select the right device/qp list based on driver name 319 * or error if it does not exist. 320 */ 321 if (strcmp(device->cdev_info.driver_name, QAT) == 0) { 322 dev_qp_head = (struct device_qps *)&g_device_qp_qat; 323 } else if (strcmp(device->cdev_info.driver_name, AESNI_MB) == 0) { 324 dev_qp_head = (struct device_qps *)&g_device_qp_aesni_mb; 325 } else { 326 rc = -EINVAL; 327 goto err; 328 } 329 330 /* Build up lists of device/qp combinations per PMD */ 331 for (j = 0; j < device->cdev_info.max_nb_queue_pairs; j++) { 332 dev_qp = calloc(1, sizeof(struct device_qp)); 333 if (!dev_qp) { 334 rc = -ENOMEM; 335 goto err_qp_alloc; 336 } 337 dev_qp->device = device; 338 dev_qp->qp = j; 339 dev_qp->in_use = false; 340 if (strcmp(device->cdev_info.driver_name, QAT) == 0) { 341 g_qat_total_qp++; 342 } 343 TAILQ_INSERT_TAIL(dev_qp_head, dev_qp, link); 344 } 345 346 /* Add to our list of available crypto devices. */ 347 TAILQ_INSERT_TAIL(&g_vbdev_devs, device, link); 348 349 return 0; 350 err_qp_alloc: 351 TAILQ_FOREACH_SAFE(dev_qp, dev_qp_head, link, tmp_qp) { 352 TAILQ_REMOVE(dev_qp_head, dev_qp, link); 353 free(dev_qp); 354 } 355 err: 356 free(device); 357 358 return rc; 359 } 360 361 /* This is called from the module's init function. We setup all crypto devices early on as we are unable 362 * to easily dynamically configure queue pairs after the drivers are up and running. So, here, we 363 * configure the max capabilities of each device and assign threads to queue pairs as channels are 364 * requested. 365 */ 366 static int 367 vbdev_crypto_init_crypto_drivers(void) 368 { 369 uint8_t cdev_count; 370 uint8_t cdev_id; 371 int i, rc = 0; 372 struct vbdev_dev *device; 373 struct vbdev_dev *tmp_dev; 374 struct device_qp *dev_qp; 375 unsigned int max_sess_size = 0, sess_size; 376 uint16_t num_lcores = rte_lcore_count(); 377 char aesni_args[32]; 378 379 /* Only the first call, via RPC or module init should init the crypto drivers. */ 380 if (g_session_mp != NULL) { 381 return 0; 382 } 383 384 /* We always init AESNI_MB */ 385 snprintf(aesni_args, sizeof(aesni_args), "max_nb_queue_pairs=%d", AESNI_MB_NUM_QP); 386 rc = rte_vdev_init(AESNI_MB, aesni_args); 387 if (rc) { 388 SPDK_ERRLOG("error creating virtual PMD %s\n", AESNI_MB); 389 return -EINVAL; 390 } 391 392 /* If we have no crypto devices, there's no reason to continue. */ 393 cdev_count = rte_cryptodev_count(); 394 if (cdev_count == 0) { 395 return 0; 396 } 397 398 /* 399 * Create global mempools, shared by all devices regardless of type. 400 */ 401 402 /* First determine max session size, most pools are shared by all the devices, 403 * so we need to find the global max sessions size. 404 */ 405 for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) { 406 sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id); 407 if (sess_size > max_sess_size) { 408 max_sess_size = sess_size; 409 } 410 } 411 412 #if RTE_VERSION >= RTE_VERSION_NUM(19, 02, 0, 0) 413 g_session_mp_priv = rte_mempool_create("session_mp_priv", NUM_SESSIONS, max_sess_size, 414 SESS_MEMPOOL_CACHE_SIZE, 0, NULL, NULL, NULL, 415 NULL, SOCKET_ID_ANY, 0); 416 if (g_session_mp_priv == NULL) { 417 SPDK_ERRLOG("Cannot create private session pool max size 0x%x\n", max_sess_size); 418 return -ENOMEM; 419 } 420 421 g_session_mp = rte_cryptodev_sym_session_pool_create( 422 "session_mp", 423 NUM_SESSIONS, 0, SESS_MEMPOOL_CACHE_SIZE, 0, 424 SOCKET_ID_ANY); 425 #else 426 g_session_mp = rte_mempool_create("session_mp", NUM_SESSIONS, max_sess_size, 427 SESS_MEMPOOL_CACHE_SIZE, 428 0, NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0); 429 #endif 430 if (g_session_mp == NULL) { 431 SPDK_ERRLOG("Cannot create session pool max size 0x%x\n", max_sess_size); 432 goto error_create_session_mp; 433 return -ENOMEM; 434 } 435 436 g_mbuf_mp = spdk_mempool_create("mbuf_mp", NUM_MBUFS, sizeof(struct rte_mbuf), 437 SPDK_MEMPOOL_DEFAULT_CACHE_SIZE, 438 SPDK_ENV_SOCKET_ID_ANY); 439 if (g_mbuf_mp == NULL) { 440 SPDK_ERRLOG("Cannot create mbuf pool\n"); 441 rc = -ENOMEM; 442 goto error_create_mbuf; 443 } 444 445 /* We use per op private data to store the IV and our own struct 446 * for queueing ops. 447 */ 448 g_crypto_op_mp = rte_crypto_op_pool_create("op_mp", 449 RTE_CRYPTO_OP_TYPE_SYMMETRIC, 450 NUM_MBUFS, 451 POOL_CACHE_SIZE, 452 AES_CBC_IV_LENGTH + QUEUED_OP_LENGTH, 453 rte_socket_id()); 454 455 if (g_crypto_op_mp == NULL) { 456 SPDK_ERRLOG("Cannot create op pool\n"); 457 rc = -ENOMEM; 458 goto error_create_op; 459 } 460 461 /* Init all devices */ 462 for (i = 0; i < cdev_count; i++) { 463 rc = create_vbdev_dev(i, num_lcores); 464 if (rc) { 465 goto err; 466 } 467 } 468 469 /* Assign index values to the QAT device qp nodes so that we can 470 * assign them for optimal performance. 471 */ 472 i = 0; 473 TAILQ_FOREACH(dev_qp, &g_device_qp_qat, link) { 474 dev_qp->index = i++; 475 } 476 477 return 0; 478 479 /* Error cleanup paths. */ 480 err: 481 TAILQ_FOREACH_SAFE(device, &g_vbdev_devs, link, tmp_dev) { 482 TAILQ_REMOVE(&g_vbdev_devs, device, link); 483 free(device); 484 } 485 rte_mempool_free(g_crypto_op_mp); 486 g_crypto_op_mp = NULL; 487 error_create_op: 488 spdk_mempool_free(g_mbuf_mp); 489 g_mbuf_mp = NULL; 490 error_create_mbuf: 491 rte_mempool_free(g_session_mp); 492 g_session_mp = NULL; 493 error_create_session_mp: 494 if (g_session_mp_priv != NULL) { 495 rte_mempool_free(g_session_mp_priv); 496 g_session_mp_priv = NULL; 497 } 498 return rc; 499 } 500 501 /* Following an encrypt or decrypt we need to then either write the encrypted data or finish 502 * the read on decrypted data. Do that here. 503 */ 504 static void 505 _crypto_operation_complete(struct spdk_bdev_io *bdev_io) 506 { 507 struct vbdev_crypto *crypto_bdev = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_crypto, 508 crypto_bdev); 509 struct crypto_bdev_io *io_ctx = (struct crypto_bdev_io *)bdev_io->driver_ctx; 510 struct crypto_io_channel *crypto_ch = io_ctx->crypto_ch; 511 struct spdk_bdev_io *free_me = io_ctx->read_io; 512 int rc = 0; 513 514 TAILQ_REMOVE(&crypto_ch->pending_cry_ios, bdev_io, module_link); 515 516 if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 517 518 /* Complete the original IO and then free the one that we created 519 * as a result of issuing an IO via submit_request. 520 */ 521 if (io_ctx->bdev_io_status != SPDK_BDEV_IO_STATUS_FAILED) { 522 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS); 523 } else { 524 SPDK_ERRLOG("Issue with decryption on bdev_io %p\n", bdev_io); 525 rc = -EINVAL; 526 } 527 spdk_bdev_free_io(free_me); 528 529 } else if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 530 531 if (io_ctx->bdev_io_status != SPDK_BDEV_IO_STATUS_FAILED) { 532 /* Write the encrypted data. */ 533 rc = spdk_bdev_writev_blocks(crypto_bdev->base_desc, crypto_ch->base_ch, 534 &io_ctx->aux_buf_iov, 1, io_ctx->aux_offset_blocks, 535 io_ctx->aux_num_blocks, _complete_internal_write, 536 bdev_io); 537 } else { 538 SPDK_ERRLOG("Issue with encryption on bdev_io %p\n", bdev_io); 539 rc = -EINVAL; 540 } 541 542 } else { 543 SPDK_ERRLOG("Unknown bdev type %u on crypto operation completion\n", 544 bdev_io->type); 545 rc = -EINVAL; 546 } 547 548 if (rc) { 549 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 550 } 551 } 552 553 static int _crypto_operation(struct spdk_bdev_io *bdev_io, 554 enum rte_crypto_cipher_operation crypto_op, 555 void *aux_buf); 556 557 /* This is the poller for the crypto device. It uses a single API to dequeue whatever is ready at 558 * the device. Then we need to decide if what we've got so far (including previous poller 559 * runs) totals up to one or more complete bdev_ios and if so continue with the bdev_io 560 * accordingly. This means either completing a read or issuing a new write. 561 */ 562 static int 563 crypto_dev_poller(void *args) 564 { 565 struct crypto_io_channel *crypto_ch = args; 566 uint8_t cdev_id = crypto_ch->device_qp->device->cdev_id; 567 int i, num_dequeued_ops, num_enqueued_ops; 568 struct spdk_bdev_io *bdev_io = NULL; 569 struct crypto_bdev_io *io_ctx = NULL; 570 struct rte_crypto_op *dequeued_ops[MAX_DEQUEUE_BURST_SIZE]; 571 struct rte_crypto_op *mbufs_to_free[2 * MAX_DEQUEUE_BURST_SIZE]; 572 int num_mbufs = 0; 573 struct vbdev_crypto_op *op_to_resubmit; 574 575 /* Each run of the poller will get just what the device has available 576 * at the moment we call it, we don't check again after draining the 577 * first batch. 578 */ 579 num_dequeued_ops = rte_cryptodev_dequeue_burst(cdev_id, crypto_ch->device_qp->qp, 580 dequeued_ops, MAX_DEQUEUE_BURST_SIZE); 581 582 /* Check if operation was processed successfully */ 583 for (i = 0; i < num_dequeued_ops; i++) { 584 585 /* We don't know the order or association of the crypto ops wrt any 586 * partiular bdev_io so need to look at each and determine if it's 587 * the last one for it's bdev_io or not. 588 */ 589 bdev_io = (struct spdk_bdev_io *)dequeued_ops[i]->sym->m_src->userdata; 590 assert(bdev_io != NULL); 591 io_ctx = (struct crypto_bdev_io *)bdev_io->driver_ctx; 592 593 if (dequeued_ops[i]->status != RTE_CRYPTO_OP_STATUS_SUCCESS) { 594 SPDK_ERRLOG("error with op %d status %u\n", i, 595 dequeued_ops[i]->status); 596 /* Update the bdev status to error, we'll still process the 597 * rest of the crypto ops for this bdev_io though so they 598 * aren't left hanging. 599 */ 600 io_ctx->bdev_io_status = SPDK_BDEV_IO_STATUS_FAILED; 601 } 602 603 assert(io_ctx->cryop_cnt_remaining > 0); 604 605 /* Return the associated src and dst mbufs by collecting them into 606 * an array that we can use the bulk API to free after the loop. 607 */ 608 dequeued_ops[i]->sym->m_src->userdata = NULL; 609 mbufs_to_free[num_mbufs++] = (void *)dequeued_ops[i]->sym->m_src; 610 if (dequeued_ops[i]->sym->m_dst) { 611 mbufs_to_free[num_mbufs++] = (void *)dequeued_ops[i]->sym->m_dst; 612 } 613 614 /* done encrypting, complete the bdev_io */ 615 if (--io_ctx->cryop_cnt_remaining == 0) { 616 617 /* If we're completing this with an outstanding reset we need 618 * to fail it. 619 */ 620 if (crypto_ch->iter) { 621 io_ctx->bdev_io_status = SPDK_BDEV_IO_STATUS_FAILED; 622 } 623 624 /* Complete the IO */ 625 _crypto_operation_complete(bdev_io); 626 } 627 } 628 629 /* Now bulk free both mbufs and crypto operations. */ 630 if (num_dequeued_ops > 0) { 631 rte_mempool_put_bulk(g_crypto_op_mp, 632 (void **)dequeued_ops, 633 num_dequeued_ops); 634 assert(num_mbufs > 0); 635 spdk_mempool_put_bulk(g_mbuf_mp, 636 (void **)mbufs_to_free, 637 num_mbufs); 638 } 639 640 /* Check if there are any pending crypto ops to process */ 641 while (!TAILQ_EMPTY(&crypto_ch->queued_cry_ops)) { 642 op_to_resubmit = TAILQ_FIRST(&crypto_ch->queued_cry_ops); 643 io_ctx = (struct crypto_bdev_io *)op_to_resubmit->bdev_io->driver_ctx; 644 num_enqueued_ops = rte_cryptodev_enqueue_burst(op_to_resubmit->cdev_id, 645 op_to_resubmit->qp, 646 &op_to_resubmit->crypto_op, 647 1); 648 if (num_enqueued_ops == 1) { 649 /* Make sure we don't put this on twice as one bdev_io is made up 650 * of many crypto ops. 651 */ 652 if (io_ctx->on_pending_list == false) { 653 TAILQ_INSERT_TAIL(&crypto_ch->pending_cry_ios, op_to_resubmit->bdev_io, module_link); 654 io_ctx->on_pending_list = true; 655 } 656 TAILQ_REMOVE(&crypto_ch->queued_cry_ops, op_to_resubmit, link); 657 } else { 658 /* if we couldn't get one, just break and try again later. */ 659 break; 660 } 661 } 662 663 /* If the channel iter is not NULL, we need to continue to poll 664 * until the pending list is empty, then we can move on to the 665 * next channel. 666 */ 667 if (crypto_ch->iter && TAILQ_EMPTY(&crypto_ch->pending_cry_ios)) { 668 SPDK_NOTICELOG("Channel %p has been quiesced.\n", crypto_ch); 669 spdk_for_each_channel_continue(crypto_ch->iter, 0); 670 crypto_ch->iter = NULL; 671 } 672 673 return num_dequeued_ops; 674 } 675 676 /* We're either encrypting on the way down or decrypting on the way back. */ 677 static int 678 _crypto_operation(struct spdk_bdev_io *bdev_io, enum rte_crypto_cipher_operation crypto_op, 679 void *aux_buf) 680 { 681 uint16_t num_enqueued_ops = 0; 682 uint32_t cryop_cnt = bdev_io->u.bdev.num_blocks; 683 struct crypto_bdev_io *io_ctx = (struct crypto_bdev_io *)bdev_io->driver_ctx; 684 struct crypto_io_channel *crypto_ch = io_ctx->crypto_ch; 685 uint8_t cdev_id = crypto_ch->device_qp->device->cdev_id; 686 uint32_t crypto_len = io_ctx->crypto_bdev->crypto_bdev.blocklen; 687 uint64_t total_length = bdev_io->u.bdev.num_blocks * crypto_len; 688 int rc; 689 uint32_t iov_index = 0; 690 uint32_t allocated = 0; 691 uint8_t *current_iov = NULL; 692 uint64_t total_remaining = 0; 693 uint64_t updated_length, current_iov_remaining = 0; 694 uint32_t crypto_index = 0; 695 uint32_t en_offset = 0; 696 struct rte_crypto_op *crypto_ops[MAX_ENQUEUE_ARRAY_SIZE]; 697 struct rte_mbuf *src_mbufs[MAX_ENQUEUE_ARRAY_SIZE]; 698 struct rte_mbuf *dst_mbufs[MAX_ENQUEUE_ARRAY_SIZE]; 699 int burst; 700 struct vbdev_crypto_op *op_to_queue; 701 uint64_t alignment = spdk_bdev_get_buf_align(&io_ctx->crypto_bdev->crypto_bdev); 702 703 assert((bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen) <= CRYPTO_MAX_IO); 704 705 /* Get the number of source mbufs that we need. These will always be 1:1 because we 706 * don't support chaining. The reason we don't is because of our decision to use 707 * LBA as IV, there can be no case where we'd need >1 mbuf per crypto op or the 708 * op would be > 1 LBA. 709 */ 710 rc = spdk_mempool_get_bulk(g_mbuf_mp, (void **)&src_mbufs[0], cryop_cnt); 711 if (rc) { 712 SPDK_ERRLOG("ERROR trying to get src_mbufs!\n"); 713 return -ENOMEM; 714 } 715 716 /* Get the same amount but these buffers to describe the encrypted data location (dst). */ 717 if (crypto_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) { 718 rc = spdk_mempool_get_bulk(g_mbuf_mp, (void **)&dst_mbufs[0], cryop_cnt); 719 if (rc) { 720 SPDK_ERRLOG("ERROR trying to get dst_mbufs!\n"); 721 rc = -ENOMEM; 722 goto error_get_dst; 723 } 724 } 725 726 #ifdef __clang_analyzer__ 727 /* silence scan-build false positive */ 728 SPDK_CLANG_ANALYZER_PREINIT_PTR_ARRAY(crypto_ops, MAX_ENQUEUE_ARRAY_SIZE, 0x1000); 729 #endif 730 /* Allocate crypto operations. */ 731 allocated = rte_crypto_op_bulk_alloc(g_crypto_op_mp, 732 RTE_CRYPTO_OP_TYPE_SYMMETRIC, 733 crypto_ops, cryop_cnt); 734 if (allocated < cryop_cnt) { 735 SPDK_ERRLOG("ERROR trying to get crypto ops!\n"); 736 rc = -ENOMEM; 737 goto error_get_ops; 738 } 739 740 /* For encryption, we need to prepare a single contiguous buffer as the encryption 741 * destination, we'll then pass that along for the write after encryption is done. 742 * This is done to avoiding encrypting the provided write buffer which may be 743 * undesirable in some use cases. 744 */ 745 if (crypto_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) { 746 io_ctx->aux_buf_iov.iov_len = total_length; 747 io_ctx->aux_buf_raw = aux_buf; 748 io_ctx->aux_buf_iov.iov_base = (void *)(((uintptr_t)aux_buf + (alignment - 1)) & ~(alignment - 1)); 749 io_ctx->aux_offset_blocks = bdev_io->u.bdev.offset_blocks; 750 io_ctx->aux_num_blocks = bdev_io->u.bdev.num_blocks; 751 } 752 753 /* This value is used in the completion callback to determine when the bdev_io is 754 * complete. 755 */ 756 io_ctx->cryop_cnt_remaining = cryop_cnt; 757 758 /* As we don't support chaining because of a decision to use LBA as IV, construction 759 * of crypto operations is straightforward. We build both the op, the mbuf and the 760 * dst_mbuf in our local arrays by looping through the length of the bdev IO and 761 * picking off LBA sized blocks of memory from the IOVs as we walk through them. Each 762 * LBA sized chunk of memory will correspond 1:1 to a crypto operation and a single 763 * mbuf per crypto operation. 764 */ 765 total_remaining = total_length; 766 current_iov = bdev_io->u.bdev.iovs[iov_index].iov_base; 767 current_iov_remaining = bdev_io->u.bdev.iovs[iov_index].iov_len; 768 do { 769 uint8_t *iv_ptr; 770 uint64_t op_block_offset; 771 772 /* Set the mbuf elements address and length. Null out the next pointer. */ 773 src_mbufs[crypto_index]->buf_addr = current_iov; 774 src_mbufs[crypto_index]->data_len = updated_length = crypto_len; 775 /* TODO: Make this assignment conditional on QAT usage and add an assert. */ 776 src_mbufs[crypto_index]->buf_iova = spdk_vtophys((void *)current_iov, &updated_length); 777 src_mbufs[crypto_index]->next = NULL; 778 /* Store context in every mbuf as we don't know anything about completion order */ 779 src_mbufs[crypto_index]->userdata = bdev_io; 780 781 /* Set the IV - we use the LBA of the crypto_op */ 782 iv_ptr = rte_crypto_op_ctod_offset(crypto_ops[crypto_index], uint8_t *, 783 IV_OFFSET); 784 memset(iv_ptr, 0, AES_CBC_IV_LENGTH); 785 op_block_offset = bdev_io->u.bdev.offset_blocks + crypto_index; 786 rte_memcpy(iv_ptr, &op_block_offset, sizeof(uint64_t)); 787 788 /* Set the data to encrypt/decrypt length */ 789 crypto_ops[crypto_index]->sym->cipher.data.length = crypto_len; 790 crypto_ops[crypto_index]->sym->cipher.data.offset = 0; 791 792 /* link the mbuf to the crypto op. */ 793 crypto_ops[crypto_index]->sym->m_src = src_mbufs[crypto_index]; 794 if (crypto_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) { 795 crypto_ops[crypto_index]->sym->m_dst = src_mbufs[crypto_index]; 796 } else { 797 crypto_ops[crypto_index]->sym->m_dst = NULL; 798 } 799 800 /* For encrypt, point the destination to a buffer we allocate and redirect the bdev_io 801 * that will be used to process the write on completion to the same buffer. Setting 802 * up the en_buffer is a little simpler as we know the destination buffer is single IOV. 803 */ 804 if (crypto_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) { 805 806 /* Set the relevant destination en_mbuf elements. */ 807 dst_mbufs[crypto_index]->buf_addr = io_ctx->aux_buf_iov.iov_base + en_offset; 808 dst_mbufs[crypto_index]->data_len = updated_length = crypto_len; 809 /* TODO: Make this assignment conditional on QAT usage and add an assert. */ 810 dst_mbufs[crypto_index]->buf_iova = spdk_vtophys(dst_mbufs[crypto_index]->buf_addr, 811 &updated_length); 812 crypto_ops[crypto_index]->sym->m_dst = dst_mbufs[crypto_index]; 813 en_offset += crypto_len; 814 dst_mbufs[crypto_index]->next = NULL; 815 816 /* Attach the crypto session to the operation */ 817 rc = rte_crypto_op_attach_sym_session(crypto_ops[crypto_index], 818 io_ctx->crypto_bdev->session_encrypt); 819 if (rc) { 820 rc = -EINVAL; 821 goto error_attach_session; 822 } 823 824 } else { 825 /* Attach the crypto session to the operation */ 826 rc = rte_crypto_op_attach_sym_session(crypto_ops[crypto_index], 827 io_ctx->crypto_bdev->session_decrypt); 828 if (rc) { 829 rc = -EINVAL; 830 goto error_attach_session; 831 } 832 833 834 } 835 836 /* Subtract our running totals for the op in progress and the overall bdev io */ 837 total_remaining -= crypto_len; 838 current_iov_remaining -= crypto_len; 839 840 /* move our current IOV pointer accordingly. */ 841 current_iov += crypto_len; 842 843 /* move on to the next crypto operation */ 844 crypto_index++; 845 846 /* If we're done with this IOV, move to the next one. */ 847 if (current_iov_remaining == 0 && total_remaining > 0) { 848 iov_index++; 849 current_iov = bdev_io->u.bdev.iovs[iov_index].iov_base; 850 current_iov_remaining = bdev_io->u.bdev.iovs[iov_index].iov_len; 851 } 852 } while (total_remaining > 0); 853 854 /* Enqueue everything we've got but limit by the max number of descriptors we 855 * configured the crypto device for. 856 */ 857 burst = spdk_min(cryop_cnt, CRYPTO_QP_DESCRIPTORS); 858 num_enqueued_ops = rte_cryptodev_enqueue_burst(cdev_id, crypto_ch->device_qp->qp, 859 &crypto_ops[0], 860 burst); 861 862 /* Add this bdev_io to our outstanding list if any of its crypto ops made it. */ 863 if (num_enqueued_ops > 0) { 864 TAILQ_INSERT_TAIL(&crypto_ch->pending_cry_ios, bdev_io, module_link); 865 io_ctx->on_pending_list = true; 866 } 867 /* We were unable to enqueue everything but did get some, so need to decide what 868 * to do based on the status of the last op. 869 */ 870 if (num_enqueued_ops < cryop_cnt) { 871 switch (crypto_ops[num_enqueued_ops]->status) { 872 case RTE_CRYPTO_OP_STATUS_NOT_PROCESSED: 873 /* Queue them up on a linked list to be resubmitted via the poller. */ 874 for (crypto_index = num_enqueued_ops; crypto_index < cryop_cnt; crypto_index++) { 875 op_to_queue = (struct vbdev_crypto_op *)rte_crypto_op_ctod_offset(crypto_ops[crypto_index], 876 uint8_t *, QUEUED_OP_OFFSET); 877 op_to_queue->cdev_id = cdev_id; 878 op_to_queue->qp = crypto_ch->device_qp->qp; 879 op_to_queue->crypto_op = crypto_ops[crypto_index]; 880 op_to_queue->bdev_io = bdev_io; 881 TAILQ_INSERT_TAIL(&crypto_ch->queued_cry_ops, 882 op_to_queue, 883 link); 884 } 885 break; 886 default: 887 /* For all other statuses, set the io_ctx bdev_io status so that 888 * the poller will pick the failure up for the overall bdev status. 889 */ 890 io_ctx->bdev_io_status = SPDK_BDEV_IO_STATUS_FAILED; 891 if (num_enqueued_ops == 0) { 892 /* If nothing was enqueued, but the last one wasn't because of 893 * busy, fail it now as the poller won't know anything about it. 894 */ 895 _crypto_operation_complete(bdev_io); 896 rc = -EINVAL; 897 goto error_attach_session; 898 } 899 break; 900 } 901 } 902 903 return rc; 904 905 /* Error cleanup paths. */ 906 error_attach_session: 907 error_get_ops: 908 if (crypto_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) { 909 spdk_mempool_put_bulk(g_mbuf_mp, (void **)&dst_mbufs[0], 910 cryop_cnt); 911 } 912 if (allocated > 0) { 913 rte_mempool_put_bulk(g_crypto_op_mp, (void **)crypto_ops, 914 allocated); 915 } 916 error_get_dst: 917 spdk_mempool_put_bulk(g_mbuf_mp, (void **)&src_mbufs[0], 918 cryop_cnt); 919 return rc; 920 } 921 922 /* This function is called after all channels have been quiesced following 923 * a bdev reset. 924 */ 925 static void 926 _ch_quiesce_done(struct spdk_io_channel_iter *i, int status) 927 { 928 struct crypto_bdev_io *io_ctx = spdk_io_channel_iter_get_ctx(i); 929 930 assert(TAILQ_EMPTY(&io_ctx->crypto_ch->pending_cry_ios)); 931 assert(io_ctx->orig_io != NULL); 932 933 spdk_bdev_io_complete(io_ctx->orig_io, SPDK_BDEV_IO_STATUS_SUCCESS); 934 } 935 936 /* This function is called per channel to quiesce IOs before completing a 937 * bdev reset that we received. 938 */ 939 static void 940 _ch_quiesce(struct spdk_io_channel_iter *i) 941 { 942 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); 943 struct crypto_io_channel *crypto_ch = spdk_io_channel_get_ctx(ch); 944 945 crypto_ch->iter = i; 946 /* When the poller runs, it will see the non-NULL iter and handle 947 * the quiesce. 948 */ 949 } 950 951 /* Completion callback for IO that were issued from this bdev other than read/write. 952 * They have their own for readability. 953 */ 954 static void 955 _complete_internal_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 956 { 957 struct spdk_bdev_io *orig_io = cb_arg; 958 int status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED; 959 960 if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) { 961 struct crypto_bdev_io *orig_ctx = (struct crypto_bdev_io *)orig_io->driver_ctx; 962 963 assert(orig_io == orig_ctx->orig_io); 964 965 spdk_bdev_free_io(bdev_io); 966 967 spdk_for_each_channel(orig_ctx->crypto_bdev, 968 _ch_quiesce, 969 orig_ctx, 970 _ch_quiesce_done); 971 return; 972 } 973 974 spdk_bdev_io_complete(orig_io, status); 975 spdk_bdev_free_io(bdev_io); 976 } 977 978 /* Completion callback for writes that were issued from this bdev. */ 979 static void 980 _complete_internal_write(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 981 { 982 struct spdk_bdev_io *orig_io = cb_arg; 983 int status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED; 984 struct crypto_bdev_io *orig_ctx = (struct crypto_bdev_io *)orig_io->driver_ctx; 985 986 spdk_bdev_io_put_aux_buf(orig_io, orig_ctx->aux_buf_raw); 987 988 spdk_bdev_io_complete(orig_io, status); 989 spdk_bdev_free_io(bdev_io); 990 } 991 992 /* Completion callback for reads that were issued from this bdev. */ 993 static void 994 _complete_internal_read(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 995 { 996 struct spdk_bdev_io *orig_io = cb_arg; 997 struct crypto_bdev_io *orig_ctx = (struct crypto_bdev_io *)orig_io->driver_ctx; 998 999 if (success) { 1000 1001 /* Save off this bdev_io so it can be freed after decryption. */ 1002 orig_ctx->read_io = bdev_io; 1003 1004 if (!_crypto_operation(orig_io, RTE_CRYPTO_CIPHER_OP_DECRYPT, NULL)) { 1005 return; 1006 } else { 1007 SPDK_ERRLOG("ERROR decrypting\n"); 1008 } 1009 } else { 1010 SPDK_ERRLOG("ERROR on read prior to decrypting\n"); 1011 } 1012 1013 spdk_bdev_io_complete(orig_io, SPDK_BDEV_IO_STATUS_FAILED); 1014 spdk_bdev_free_io(bdev_io); 1015 } 1016 1017 static void 1018 vbdev_crypto_resubmit_io(void *arg) 1019 { 1020 struct spdk_bdev_io *bdev_io = (struct spdk_bdev_io *)arg; 1021 struct crypto_bdev_io *io_ctx = (struct crypto_bdev_io *)bdev_io->driver_ctx; 1022 1023 vbdev_crypto_submit_request(io_ctx->ch, bdev_io); 1024 } 1025 1026 static void 1027 vbdev_crypto_queue_io(struct spdk_bdev_io *bdev_io) 1028 { 1029 struct crypto_bdev_io *io_ctx = (struct crypto_bdev_io *)bdev_io->driver_ctx; 1030 int rc; 1031 1032 io_ctx->bdev_io_wait.bdev = bdev_io->bdev; 1033 io_ctx->bdev_io_wait.cb_fn = vbdev_crypto_resubmit_io; 1034 io_ctx->bdev_io_wait.cb_arg = bdev_io; 1035 1036 rc = spdk_bdev_queue_io_wait(bdev_io->bdev, io_ctx->crypto_ch->base_ch, &io_ctx->bdev_io_wait); 1037 if (rc != 0) { 1038 SPDK_ERRLOG("Queue io failed in vbdev_crypto_queue_io, rc=%d.\n", rc); 1039 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 1040 } 1041 } 1042 1043 /* Callback for getting a buf from the bdev pool in the event that the caller passed 1044 * in NULL, we need to own the buffer so it doesn't get freed by another vbdev module 1045 * beneath us before we're done with it. 1046 */ 1047 static void 1048 crypto_read_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 1049 bool success) 1050 { 1051 struct vbdev_crypto *crypto_bdev = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_crypto, 1052 crypto_bdev); 1053 struct crypto_io_channel *crypto_ch = spdk_io_channel_get_ctx(ch); 1054 struct crypto_bdev_io *io_ctx = (struct crypto_bdev_io *)bdev_io->driver_ctx; 1055 int rc; 1056 1057 if (!success) { 1058 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 1059 return; 1060 } 1061 1062 rc = spdk_bdev_readv_blocks(crypto_bdev->base_desc, crypto_ch->base_ch, bdev_io->u.bdev.iovs, 1063 bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks, 1064 bdev_io->u.bdev.num_blocks, _complete_internal_read, 1065 bdev_io); 1066 if (rc != 0) { 1067 if (rc == -ENOMEM) { 1068 SPDK_DEBUGLOG(vbdev_crypto, "No memory, queue the IO.\n"); 1069 io_ctx->ch = ch; 1070 vbdev_crypto_queue_io(bdev_io); 1071 } else { 1072 SPDK_ERRLOG("ERROR on bdev_io submission!\n"); 1073 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 1074 } 1075 } 1076 } 1077 1078 /* For encryption we don't want to encrypt the data in place as the host isn't 1079 * expecting us to mangle its data buffers so we need to encrypt into the bdev 1080 * aux buffer, then we can use that as the source for the disk data transfer. 1081 */ 1082 static void 1083 crypto_write_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 1084 void *aux_buf) 1085 { 1086 struct crypto_bdev_io *io_ctx = (struct crypto_bdev_io *)bdev_io->driver_ctx; 1087 int rc = 0; 1088 1089 rc = _crypto_operation(bdev_io, RTE_CRYPTO_CIPHER_OP_ENCRYPT, aux_buf); 1090 if (rc != 0) { 1091 spdk_bdev_io_put_aux_buf(bdev_io, aux_buf); 1092 if (rc == -ENOMEM) { 1093 SPDK_DEBUGLOG(vbdev_crypto, "No memory, queue the IO.\n"); 1094 io_ctx->ch = ch; 1095 vbdev_crypto_queue_io(bdev_io); 1096 } else { 1097 SPDK_ERRLOG("ERROR on bdev_io submission!\n"); 1098 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 1099 } 1100 } 1101 } 1102 1103 /* Called when someone submits IO to this crypto vbdev. For IO's not relevant to crypto, 1104 * we're simply passing it on here via SPDK IO calls which in turn allocate another bdev IO 1105 * and call our cpl callback provided below along with the original bdev_io so that we can 1106 * complete it once this IO completes. For crypto operations, we'll either encrypt it first 1107 * (writes) then call back into bdev to submit it or we'll submit a read and then catch it 1108 * on the way back for decryption. 1109 */ 1110 static void 1111 vbdev_crypto_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 1112 { 1113 struct vbdev_crypto *crypto_bdev = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_crypto, 1114 crypto_bdev); 1115 struct crypto_io_channel *crypto_ch = spdk_io_channel_get_ctx(ch); 1116 struct crypto_bdev_io *io_ctx = (struct crypto_bdev_io *)bdev_io->driver_ctx; 1117 int rc = 0; 1118 1119 memset(io_ctx, 0, sizeof(struct crypto_bdev_io)); 1120 io_ctx->crypto_bdev = crypto_bdev; 1121 io_ctx->crypto_ch = crypto_ch; 1122 io_ctx->orig_io = bdev_io; 1123 io_ctx->bdev_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1124 1125 switch (bdev_io->type) { 1126 case SPDK_BDEV_IO_TYPE_READ: 1127 spdk_bdev_io_get_buf(bdev_io, crypto_read_get_buf_cb, 1128 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 1129 break; 1130 case SPDK_BDEV_IO_TYPE_WRITE: 1131 /* Tell the bdev layer that we need an aux buf in addition to the data 1132 * buf already associated with the bdev. 1133 */ 1134 spdk_bdev_io_get_aux_buf(bdev_io, crypto_write_get_buf_cb); 1135 break; 1136 case SPDK_BDEV_IO_TYPE_UNMAP: 1137 rc = spdk_bdev_unmap_blocks(crypto_bdev->base_desc, crypto_ch->base_ch, 1138 bdev_io->u.bdev.offset_blocks, 1139 bdev_io->u.bdev.num_blocks, 1140 _complete_internal_io, bdev_io); 1141 break; 1142 case SPDK_BDEV_IO_TYPE_FLUSH: 1143 rc = spdk_bdev_flush_blocks(crypto_bdev->base_desc, crypto_ch->base_ch, 1144 bdev_io->u.bdev.offset_blocks, 1145 bdev_io->u.bdev.num_blocks, 1146 _complete_internal_io, bdev_io); 1147 break; 1148 case SPDK_BDEV_IO_TYPE_RESET: 1149 rc = spdk_bdev_reset(crypto_bdev->base_desc, crypto_ch->base_ch, 1150 _complete_internal_io, bdev_io); 1151 break; 1152 case SPDK_BDEV_IO_TYPE_WRITE_ZEROES: 1153 default: 1154 SPDK_ERRLOG("crypto: unknown I/O type %d\n", bdev_io->type); 1155 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 1156 return; 1157 } 1158 1159 if (rc != 0) { 1160 if (rc == -ENOMEM) { 1161 SPDK_DEBUGLOG(vbdev_crypto, "No memory, queue the IO.\n"); 1162 io_ctx->ch = ch; 1163 vbdev_crypto_queue_io(bdev_io); 1164 } else { 1165 SPDK_ERRLOG("ERROR on bdev_io submission!\n"); 1166 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 1167 } 1168 } 1169 } 1170 1171 /* We'll just call the base bdev and let it answer except for WZ command which 1172 * we always say we don't support so that the bdev layer will actually send us 1173 * real writes that we can encrypt. 1174 */ 1175 static bool 1176 vbdev_crypto_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type) 1177 { 1178 struct vbdev_crypto *crypto_bdev = (struct vbdev_crypto *)ctx; 1179 1180 switch (io_type) { 1181 case SPDK_BDEV_IO_TYPE_WRITE: 1182 case SPDK_BDEV_IO_TYPE_UNMAP: 1183 case SPDK_BDEV_IO_TYPE_RESET: 1184 case SPDK_BDEV_IO_TYPE_READ: 1185 case SPDK_BDEV_IO_TYPE_FLUSH: 1186 return spdk_bdev_io_type_supported(crypto_bdev->base_bdev, io_type); 1187 case SPDK_BDEV_IO_TYPE_WRITE_ZEROES: 1188 /* Force the bdev layer to issue actual writes of zeroes so we can 1189 * encrypt them as regular writes. 1190 */ 1191 default: 1192 return false; 1193 } 1194 } 1195 1196 /* Callback for unregistering the IO device. */ 1197 static void 1198 _device_unregister_cb(void *io_device) 1199 { 1200 struct vbdev_crypto *crypto_bdev = io_device; 1201 1202 /* Done with this crypto_bdev. */ 1203 rte_cryptodev_sym_session_free(crypto_bdev->session_decrypt); 1204 rte_cryptodev_sym_session_free(crypto_bdev->session_encrypt); 1205 free(crypto_bdev->drv_name); 1206 if (crypto_bdev->key) { 1207 memset(crypto_bdev->key, 0, strnlen(crypto_bdev->key, (AES_CBC_KEY_LENGTH + 1))); 1208 free(crypto_bdev->key); 1209 } 1210 if (crypto_bdev->key2) { 1211 memset(crypto_bdev->key2, 0, strnlen(crypto_bdev->key2, (AES_XTS_KEY_LENGTH + 1))); 1212 free(crypto_bdev->key2); 1213 } 1214 if (crypto_bdev->xts_key) { 1215 memset(crypto_bdev->xts_key, 0, strnlen(crypto_bdev->xts_key, (AES_XTS_KEY_LENGTH * 2) + 1)); 1216 free(crypto_bdev->xts_key); 1217 } 1218 free(crypto_bdev->crypto_bdev.name); 1219 free(crypto_bdev); 1220 } 1221 1222 /* Wrapper for the bdev close operation. */ 1223 static void 1224 _vbdev_crypto_destruct(void *ctx) 1225 { 1226 struct spdk_bdev_desc *desc = ctx; 1227 1228 spdk_bdev_close(desc); 1229 } 1230 1231 /* Called after we've unregistered following a hot remove callback. 1232 * Our finish entry point will be called next. 1233 */ 1234 static int 1235 vbdev_crypto_destruct(void *ctx) 1236 { 1237 struct vbdev_crypto *crypto_bdev = (struct vbdev_crypto *)ctx; 1238 1239 /* Remove this device from the internal list */ 1240 TAILQ_REMOVE(&g_vbdev_crypto, crypto_bdev, link); 1241 1242 /* Unclaim the underlying bdev. */ 1243 spdk_bdev_module_release_bdev(crypto_bdev->base_bdev); 1244 1245 /* Close the underlying bdev on its same opened thread. */ 1246 if (crypto_bdev->thread && crypto_bdev->thread != spdk_get_thread()) { 1247 spdk_thread_send_msg(crypto_bdev->thread, _vbdev_crypto_destruct, crypto_bdev->base_desc); 1248 } else { 1249 spdk_bdev_close(crypto_bdev->base_desc); 1250 } 1251 1252 /* Unregister the io_device. */ 1253 spdk_io_device_unregister(crypto_bdev, _device_unregister_cb); 1254 1255 g_number_of_claimed_volumes--; 1256 1257 return 0; 1258 } 1259 1260 /* We supplied this as an entry point for upper layers who want to communicate to this 1261 * bdev. This is how they get a channel. We are passed the same context we provided when 1262 * we created our crypto vbdev in examine() which, for this bdev, is the address of one of 1263 * our context nodes. From here we'll ask the SPDK channel code to fill out our channel 1264 * struct and we'll keep it in our crypto node. 1265 */ 1266 static struct spdk_io_channel * 1267 vbdev_crypto_get_io_channel(void *ctx) 1268 { 1269 struct vbdev_crypto *crypto_bdev = (struct vbdev_crypto *)ctx; 1270 1271 /* The IO channel code will allocate a channel for us which consists of 1272 * the SPDK channel structure plus the size of our crypto_io_channel struct 1273 * that we passed in when we registered our IO device. It will then call 1274 * our channel create callback to populate any elements that we need to 1275 * update. 1276 */ 1277 return spdk_get_io_channel(crypto_bdev); 1278 } 1279 1280 /* This is the output for bdev_get_bdevs() for this vbdev */ 1281 static int 1282 vbdev_crypto_dump_info_json(void *ctx, struct spdk_json_write_ctx *w) 1283 { 1284 struct vbdev_crypto *crypto_bdev = (struct vbdev_crypto *)ctx; 1285 1286 spdk_json_write_name(w, "crypto"); 1287 spdk_json_write_object_begin(w); 1288 spdk_json_write_named_string(w, "base_bdev_name", spdk_bdev_get_name(crypto_bdev->base_bdev)); 1289 spdk_json_write_named_string(w, "name", spdk_bdev_get_name(&crypto_bdev->crypto_bdev)); 1290 spdk_json_write_named_string(w, "crypto_pmd", crypto_bdev->drv_name); 1291 spdk_json_write_named_string(w, "key", crypto_bdev->key); 1292 if (strcmp(crypto_bdev->cipher, AES_XTS) == 0) { 1293 spdk_json_write_named_string(w, "key2", crypto_bdev->key); 1294 } 1295 spdk_json_write_named_string(w, "cipher", crypto_bdev->cipher); 1296 spdk_json_write_object_end(w); 1297 return 0; 1298 } 1299 1300 static int 1301 vbdev_crypto_config_json(struct spdk_json_write_ctx *w) 1302 { 1303 struct vbdev_crypto *crypto_bdev; 1304 1305 TAILQ_FOREACH(crypto_bdev, &g_vbdev_crypto, link) { 1306 spdk_json_write_object_begin(w); 1307 spdk_json_write_named_string(w, "method", "bdev_crypto_create"); 1308 spdk_json_write_named_object_begin(w, "params"); 1309 spdk_json_write_named_string(w, "base_bdev_name", spdk_bdev_get_name(crypto_bdev->base_bdev)); 1310 spdk_json_write_named_string(w, "name", spdk_bdev_get_name(&crypto_bdev->crypto_bdev)); 1311 spdk_json_write_named_string(w, "crypto_pmd", crypto_bdev->drv_name); 1312 spdk_json_write_named_string(w, "key", crypto_bdev->key); 1313 if (strcmp(crypto_bdev->cipher, AES_XTS) == 0) { 1314 spdk_json_write_named_string(w, "key2", crypto_bdev->key); 1315 } 1316 spdk_json_write_named_string(w, "cipher", crypto_bdev->cipher); 1317 spdk_json_write_object_end(w); 1318 spdk_json_write_object_end(w); 1319 } 1320 return 0; 1321 } 1322 1323 /* Helper function for the channel creation callback. */ 1324 static void 1325 _assign_device_qp(struct vbdev_crypto *crypto_bdev, struct device_qp *device_qp, 1326 struct crypto_io_channel *crypto_ch) 1327 { 1328 pthread_mutex_lock(&g_device_qp_lock); 1329 if (strcmp(crypto_bdev->drv_name, QAT) == 0) { 1330 /* For some QAT devices, the optimal qp to use is every 32nd as this spreads the 1331 * workload out over the multiple virtual functions in the device. For the devices 1332 * where this isn't the case, it doesn't hurt. 1333 */ 1334 TAILQ_FOREACH(device_qp, &g_device_qp_qat, link) { 1335 if (device_qp->index != g_next_qat_index) { 1336 continue; 1337 } 1338 if (device_qp->in_use == false) { 1339 crypto_ch->device_qp = device_qp; 1340 device_qp->in_use = true; 1341 g_next_qat_index = (g_next_qat_index + QAT_VF_SPREAD) % g_qat_total_qp; 1342 break; 1343 } else { 1344 /* if the preferred index is used, skip to the next one in this set. */ 1345 g_next_qat_index = (g_next_qat_index + 1) % g_qat_total_qp; 1346 } 1347 } 1348 } else if (strcmp(crypto_bdev->drv_name, AESNI_MB) == 0) { 1349 TAILQ_FOREACH(device_qp, &g_device_qp_aesni_mb, link) { 1350 if (device_qp->in_use == false) { 1351 crypto_ch->device_qp = device_qp; 1352 device_qp->in_use = true; 1353 break; 1354 } 1355 } 1356 } 1357 pthread_mutex_unlock(&g_device_qp_lock); 1358 } 1359 1360 /* We provide this callback for the SPDK channel code to create a channel using 1361 * the channel struct we provided in our module get_io_channel() entry point. Here 1362 * we get and save off an underlying base channel of the device below us so that 1363 * we can communicate with the base bdev on a per channel basis. We also register the 1364 * poller used to complete crypto operations from the device. 1365 */ 1366 static int 1367 crypto_bdev_ch_create_cb(void *io_device, void *ctx_buf) 1368 { 1369 struct crypto_io_channel *crypto_ch = ctx_buf; 1370 struct vbdev_crypto *crypto_bdev = io_device; 1371 struct device_qp *device_qp = NULL; 1372 1373 crypto_ch->base_ch = spdk_bdev_get_io_channel(crypto_bdev->base_desc); 1374 crypto_ch->poller = SPDK_POLLER_REGISTER(crypto_dev_poller, crypto_ch, 0); 1375 crypto_ch->device_qp = NULL; 1376 1377 /* Assign a device/qp combination that is unique per channel per PMD. */ 1378 _assign_device_qp(crypto_bdev, device_qp, crypto_ch); 1379 assert(crypto_ch->device_qp); 1380 1381 /* We use this queue to track outstanding IO in our layer. */ 1382 TAILQ_INIT(&crypto_ch->pending_cry_ios); 1383 1384 /* We use this to queue up crypto ops when the device is busy. */ 1385 TAILQ_INIT(&crypto_ch->queued_cry_ops); 1386 1387 return 0; 1388 } 1389 1390 /* We provide this callback for the SPDK channel code to destroy a channel 1391 * created with our create callback. We just need to undo anything we did 1392 * when we created. 1393 */ 1394 static void 1395 crypto_bdev_ch_destroy_cb(void *io_device, void *ctx_buf) 1396 { 1397 struct crypto_io_channel *crypto_ch = ctx_buf; 1398 1399 pthread_mutex_lock(&g_device_qp_lock); 1400 crypto_ch->device_qp->in_use = false; 1401 pthread_mutex_unlock(&g_device_qp_lock); 1402 1403 spdk_poller_unregister(&crypto_ch->poller); 1404 spdk_put_io_channel(crypto_ch->base_ch); 1405 } 1406 1407 /* Create the association from the bdev and vbdev name and insert 1408 * on the global list. */ 1409 static int 1410 vbdev_crypto_insert_name(const char *bdev_name, const char *vbdev_name, 1411 const char *crypto_pmd, const char *key, 1412 const char *cipher, const char *key2) 1413 { 1414 struct bdev_names *name; 1415 int rc, j; 1416 bool found = false; 1417 1418 TAILQ_FOREACH(name, &g_bdev_names, link) { 1419 if (strcmp(vbdev_name, name->vbdev_name) == 0) { 1420 SPDK_ERRLOG("crypto bdev %s already exists\n", vbdev_name); 1421 return -EEXIST; 1422 } 1423 } 1424 1425 name = calloc(1, sizeof(struct bdev_names)); 1426 if (!name) { 1427 SPDK_ERRLOG("could not allocate bdev_names\n"); 1428 return -ENOMEM; 1429 } 1430 1431 name->bdev_name = strdup(bdev_name); 1432 if (!name->bdev_name) { 1433 SPDK_ERRLOG("could not allocate name->bdev_name\n"); 1434 rc = -ENOMEM; 1435 goto error_alloc_bname; 1436 } 1437 1438 name->vbdev_name = strdup(vbdev_name); 1439 if (!name->vbdev_name) { 1440 SPDK_ERRLOG("could not allocate name->vbdev_name\n"); 1441 rc = -ENOMEM; 1442 goto error_alloc_vname; 1443 } 1444 1445 name->drv_name = strdup(crypto_pmd); 1446 if (!name->drv_name) { 1447 SPDK_ERRLOG("could not allocate name->drv_name\n"); 1448 rc = -ENOMEM; 1449 goto error_alloc_dname; 1450 } 1451 for (j = 0; j < MAX_NUM_DRV_TYPES ; j++) { 1452 if (strcmp(crypto_pmd, g_driver_names[j]) == 0) { 1453 found = true; 1454 break; 1455 } 1456 } 1457 if (!found) { 1458 SPDK_ERRLOG("invalid crypto PMD type %s\n", crypto_pmd); 1459 rc = -EINVAL; 1460 goto error_invalid_pmd; 1461 } 1462 1463 name->key = strdup(key); 1464 if (!name->key) { 1465 SPDK_ERRLOG("could not allocate name->key\n"); 1466 rc = -ENOMEM; 1467 goto error_alloc_key; 1468 } 1469 if (strnlen(name->key, (AES_CBC_KEY_LENGTH + 1)) != AES_CBC_KEY_LENGTH) { 1470 SPDK_ERRLOG("invalid AES_CBC key length\n"); 1471 rc = -EINVAL; 1472 goto error_invalid_key; 1473 } 1474 1475 if (strncmp(cipher, AES_XTS, sizeof(AES_XTS)) == 0) { 1476 /* To please scan-build, input validation makes sure we can't 1477 * have this cipher without providing a key2. 1478 */ 1479 name->cipher = AES_XTS; 1480 assert(key2); 1481 if (strnlen(key2, (AES_XTS_KEY_LENGTH + 1)) != AES_XTS_KEY_LENGTH) { 1482 SPDK_ERRLOG("invalid AES_XTS key length\n"); 1483 rc = -EINVAL; 1484 goto error_invalid_key2; 1485 } 1486 1487 name->key2 = strdup(key2); 1488 if (!name->key2) { 1489 SPDK_ERRLOG("could not allocate name->key2\n"); 1490 rc = -ENOMEM; 1491 goto error_alloc_key2; 1492 } 1493 } else if (strncmp(cipher, AES_CBC, sizeof(AES_CBC)) == 0) { 1494 name->cipher = AES_CBC; 1495 } else { 1496 SPDK_ERRLOG("Invalid cipher: %s\n", cipher); 1497 rc = -EINVAL; 1498 goto error_cipher; 1499 } 1500 1501 TAILQ_INSERT_TAIL(&g_bdev_names, name, link); 1502 1503 return 0; 1504 1505 /* Error cleanup paths. */ 1506 error_cipher: 1507 free(name->key2); 1508 error_alloc_key2: 1509 error_invalid_key2: 1510 error_invalid_key: 1511 free(name->key); 1512 error_alloc_key: 1513 error_invalid_pmd: 1514 free(name->drv_name); 1515 error_alloc_dname: 1516 free(name->vbdev_name); 1517 error_alloc_vname: 1518 free(name->bdev_name); 1519 error_alloc_bname: 1520 free(name); 1521 return rc; 1522 } 1523 1524 /* RPC entry point for crypto creation. */ 1525 int 1526 create_crypto_disk(const char *bdev_name, const char *vbdev_name, 1527 const char *crypto_pmd, const char *key, 1528 const char *cipher, const char *key2) 1529 { 1530 int rc; 1531 1532 rc = vbdev_crypto_insert_name(bdev_name, vbdev_name, crypto_pmd, key, cipher, key2); 1533 if (rc) { 1534 return rc; 1535 } 1536 1537 rc = vbdev_crypto_claim(bdev_name); 1538 if (rc == -ENODEV) { 1539 SPDK_NOTICELOG("vbdev creation deferred pending base bdev arrival\n"); 1540 rc = 0; 1541 } 1542 1543 return rc; 1544 } 1545 1546 /* Called at driver init time, parses config file to prepare for examine calls, 1547 * also fully initializes the crypto drivers. 1548 */ 1549 static int 1550 vbdev_crypto_init(void) 1551 { 1552 int rc = 0; 1553 1554 /* Fully configure both SW and HW drivers. */ 1555 rc = vbdev_crypto_init_crypto_drivers(); 1556 if (rc) { 1557 SPDK_ERRLOG("Error setting up crypto devices\n"); 1558 } 1559 1560 return rc; 1561 } 1562 1563 /* Called when the entire module is being torn down. */ 1564 static void 1565 vbdev_crypto_finish(void) 1566 { 1567 struct bdev_names *name; 1568 struct vbdev_dev *device; 1569 struct device_qp *dev_qp; 1570 unsigned i; 1571 int rc; 1572 1573 while ((name = TAILQ_FIRST(&g_bdev_names))) { 1574 TAILQ_REMOVE(&g_bdev_names, name, link); 1575 free(name->drv_name); 1576 free(name->key); 1577 free(name->bdev_name); 1578 free(name->vbdev_name); 1579 free(name->key2); 1580 free(name); 1581 } 1582 1583 while ((device = TAILQ_FIRST(&g_vbdev_devs))) { 1584 struct rte_cryptodev *rte_dev; 1585 1586 TAILQ_REMOVE(&g_vbdev_devs, device, link); 1587 rte_cryptodev_stop(device->cdev_id); 1588 1589 assert(device->cdev_id < RTE_CRYPTO_MAX_DEVS); 1590 rte_dev = &rte_cryptodevs[device->cdev_id]; 1591 1592 if (rte_dev->dev_ops->queue_pair_release != NULL) { 1593 for (i = 0; i < device->cdev_info.max_nb_queue_pairs; i++) { 1594 rte_dev->dev_ops->queue_pair_release(rte_dev, i); 1595 } 1596 } 1597 free(device); 1598 } 1599 rc = rte_vdev_uninit(AESNI_MB); 1600 if (rc) { 1601 SPDK_ERRLOG("%d from rte_vdev_uninit\n", rc); 1602 } 1603 1604 while ((dev_qp = TAILQ_FIRST(&g_device_qp_qat))) { 1605 TAILQ_REMOVE(&g_device_qp_qat, dev_qp, link); 1606 free(dev_qp); 1607 } 1608 1609 while ((dev_qp = TAILQ_FIRST(&g_device_qp_aesni_mb))) { 1610 TAILQ_REMOVE(&g_device_qp_aesni_mb, dev_qp, link); 1611 free(dev_qp); 1612 } 1613 1614 rte_mempool_free(g_crypto_op_mp); 1615 spdk_mempool_free(g_mbuf_mp); 1616 rte_mempool_free(g_session_mp); 1617 if (g_session_mp_priv != NULL) { 1618 rte_mempool_free(g_session_mp_priv); 1619 } 1620 } 1621 1622 /* During init we'll be asked how much memory we'd like passed to us 1623 * in bev_io structures as context. Here's where we specify how 1624 * much context we want per IO. 1625 */ 1626 static int 1627 vbdev_crypto_get_ctx_size(void) 1628 { 1629 return sizeof(struct crypto_bdev_io); 1630 } 1631 1632 static void 1633 vbdev_crypto_base_bdev_hotremove_cb(struct spdk_bdev *bdev_find) 1634 { 1635 struct vbdev_crypto *crypto_bdev, *tmp; 1636 1637 TAILQ_FOREACH_SAFE(crypto_bdev, &g_vbdev_crypto, link, tmp) { 1638 if (bdev_find == crypto_bdev->base_bdev) { 1639 spdk_bdev_unregister(&crypto_bdev->crypto_bdev, NULL, NULL); 1640 } 1641 } 1642 } 1643 1644 /* Called when the underlying base bdev triggers asynchronous event such as bdev removal. */ 1645 static void 1646 vbdev_crypto_base_bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, 1647 void *event_ctx) 1648 { 1649 switch (type) { 1650 case SPDK_BDEV_EVENT_REMOVE: 1651 vbdev_crypto_base_bdev_hotremove_cb(bdev); 1652 break; 1653 default: 1654 SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type); 1655 break; 1656 } 1657 } 1658 1659 static void 1660 vbdev_crypto_write_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w) 1661 { 1662 /* No config per bdev needed */ 1663 } 1664 1665 /* When we register our bdev this is how we specify our entry points. */ 1666 static const struct spdk_bdev_fn_table vbdev_crypto_fn_table = { 1667 .destruct = vbdev_crypto_destruct, 1668 .submit_request = vbdev_crypto_submit_request, 1669 .io_type_supported = vbdev_crypto_io_type_supported, 1670 .get_io_channel = vbdev_crypto_get_io_channel, 1671 .dump_info_json = vbdev_crypto_dump_info_json, 1672 .write_config_json = vbdev_crypto_write_config_json 1673 }; 1674 1675 static struct spdk_bdev_module crypto_if = { 1676 .name = "crypto", 1677 .module_init = vbdev_crypto_init, 1678 .get_ctx_size = vbdev_crypto_get_ctx_size, 1679 .examine_config = vbdev_crypto_examine, 1680 .module_fini = vbdev_crypto_finish, 1681 .config_json = vbdev_crypto_config_json 1682 }; 1683 1684 SPDK_BDEV_MODULE_REGISTER(crypto, &crypto_if) 1685 1686 static int 1687 vbdev_crypto_claim(const char *bdev_name) 1688 { 1689 struct bdev_names *name; 1690 struct vbdev_crypto *vbdev; 1691 struct vbdev_dev *device; 1692 struct spdk_bdev *bdev; 1693 bool found = false; 1694 int rc = 0; 1695 1696 if (g_number_of_claimed_volumes >= MAX_CRYPTO_VOLUMES) { 1697 SPDK_DEBUGLOG(vbdev_crypto, "Reached max number of claimed volumes\n"); 1698 rc = -EINVAL; 1699 goto error_vbdev_alloc; 1700 } 1701 g_number_of_claimed_volumes++; 1702 1703 /* Check our list of names from config versus this bdev and if 1704 * there's a match, create the crypto_bdev & bdev accordingly. 1705 */ 1706 TAILQ_FOREACH(name, &g_bdev_names, link) { 1707 if (strcmp(name->bdev_name, bdev_name) != 0) { 1708 continue; 1709 } 1710 SPDK_DEBUGLOG(vbdev_crypto, "Match on %s\n", bdev_name); 1711 1712 vbdev = calloc(1, sizeof(struct vbdev_crypto)); 1713 if (!vbdev) { 1714 SPDK_ERRLOG("could not allocate crypto_bdev\n"); 1715 rc = -ENOMEM; 1716 goto error_vbdev_alloc; 1717 } 1718 1719 vbdev->crypto_bdev.name = strdup(name->vbdev_name); 1720 if (!vbdev->crypto_bdev.name) { 1721 SPDK_ERRLOG("could not allocate crypto_bdev name\n"); 1722 rc = -ENOMEM; 1723 goto error_bdev_name; 1724 } 1725 1726 vbdev->key = strdup(name->key); 1727 if (!vbdev->key) { 1728 SPDK_ERRLOG("could not allocate crypto_bdev key\n"); 1729 rc = -ENOMEM; 1730 goto error_alloc_key; 1731 } 1732 1733 if (name->key2) { 1734 vbdev->key2 = strdup(name->key2); 1735 if (!vbdev->key2) { 1736 SPDK_ERRLOG("could not allocate crypto_bdev key2\n"); 1737 rc = -ENOMEM; 1738 goto error_alloc_key2; 1739 } 1740 } 1741 1742 vbdev->drv_name = strdup(name->drv_name); 1743 if (!vbdev->drv_name) { 1744 SPDK_ERRLOG("could not allocate crypto_bdev drv_name\n"); 1745 rc = -ENOMEM; 1746 goto error_drv_name; 1747 } 1748 1749 vbdev->crypto_bdev.product_name = "crypto"; 1750 1751 rc = spdk_bdev_open_ext(bdev_name, true, vbdev_crypto_base_bdev_event_cb, 1752 NULL, &vbdev->base_desc); 1753 if (rc) { 1754 if (rc != -ENODEV) { 1755 SPDK_ERRLOG("could not open bdev %s\n", bdev_name); 1756 } 1757 goto error_open; 1758 } 1759 1760 bdev = spdk_bdev_desc_get_bdev(vbdev->base_desc); 1761 vbdev->base_bdev = bdev; 1762 1763 vbdev->crypto_bdev.write_cache = bdev->write_cache; 1764 vbdev->cipher = AES_CBC; 1765 if (strcmp(vbdev->drv_name, QAT) == 0) { 1766 vbdev->crypto_bdev.required_alignment = 1767 spdk_max(spdk_u32log2(bdev->blocklen), bdev->required_alignment); 1768 SPDK_NOTICELOG("QAT in use: Required alignment set to %u\n", 1769 vbdev->crypto_bdev.required_alignment); 1770 if (strcmp(name->cipher, AES_CBC) == 0) { 1771 SPDK_NOTICELOG("QAT using cipher: AES_CBC\n"); 1772 } else { 1773 SPDK_NOTICELOG("QAT using cipher: AES_XTS\n"); 1774 vbdev->cipher = AES_XTS; 1775 /* DPDK expects they keys to be concatenated together. */ 1776 vbdev->xts_key = calloc(1, (AES_XTS_KEY_LENGTH * 2) + 1); 1777 if (vbdev->xts_key == NULL) { 1778 SPDK_ERRLOG("could not allocate memory for XTS key\n"); 1779 rc = -ENOMEM; 1780 goto error_xts_key; 1781 } 1782 memcpy(vbdev->xts_key, vbdev->key, AES_XTS_KEY_LENGTH); 1783 assert(name->key2); 1784 memcpy(vbdev->xts_key + AES_XTS_KEY_LENGTH, name->key2, AES_XTS_KEY_LENGTH + 1); 1785 } 1786 } else { 1787 vbdev->crypto_bdev.required_alignment = bdev->required_alignment; 1788 } 1789 /* Note: CRYPTO_MAX_IO is in units of bytes, optimal_io_boundary is 1790 * in units of blocks. 1791 */ 1792 if (bdev->optimal_io_boundary > 0) { 1793 vbdev->crypto_bdev.optimal_io_boundary = 1794 spdk_min((CRYPTO_MAX_IO / bdev->blocklen), bdev->optimal_io_boundary); 1795 } else { 1796 vbdev->crypto_bdev.optimal_io_boundary = (CRYPTO_MAX_IO / bdev->blocklen); 1797 } 1798 vbdev->crypto_bdev.split_on_optimal_io_boundary = true; 1799 vbdev->crypto_bdev.blocklen = bdev->blocklen; 1800 vbdev->crypto_bdev.blockcnt = bdev->blockcnt; 1801 1802 /* This is the context that is passed to us when the bdev 1803 * layer calls in so we'll save our crypto_bdev node here. 1804 */ 1805 vbdev->crypto_bdev.ctxt = vbdev; 1806 vbdev->crypto_bdev.fn_table = &vbdev_crypto_fn_table; 1807 vbdev->crypto_bdev.module = &crypto_if; 1808 TAILQ_INSERT_TAIL(&g_vbdev_crypto, vbdev, link); 1809 1810 spdk_io_device_register(vbdev, crypto_bdev_ch_create_cb, crypto_bdev_ch_destroy_cb, 1811 sizeof(struct crypto_io_channel), vbdev->crypto_bdev.name); 1812 1813 /* Save the thread where the base device is opened */ 1814 vbdev->thread = spdk_get_thread(); 1815 1816 rc = spdk_bdev_module_claim_bdev(bdev, vbdev->base_desc, vbdev->crypto_bdev.module); 1817 if (rc) { 1818 SPDK_ERRLOG("could not claim bdev %s\n", spdk_bdev_get_name(bdev)); 1819 goto error_claim; 1820 } 1821 1822 /* To init the session we have to get the cryptoDev device ID for this vbdev */ 1823 TAILQ_FOREACH(device, &g_vbdev_devs, link) { 1824 if (strcmp(device->cdev_info.driver_name, vbdev->drv_name) == 0) { 1825 found = true; 1826 break; 1827 } 1828 } 1829 if (found == false) { 1830 SPDK_ERRLOG("ERROR can't match crypto device driver to crypto vbdev!\n"); 1831 rc = -EINVAL; 1832 goto error_cant_find_devid; 1833 } 1834 1835 /* Get sessions. */ 1836 vbdev->session_encrypt = rte_cryptodev_sym_session_create(g_session_mp); 1837 if (NULL == vbdev->session_encrypt) { 1838 SPDK_ERRLOG("ERROR trying to create crypto session!\n"); 1839 rc = -EINVAL; 1840 goto error_session_en_create; 1841 } 1842 1843 vbdev->session_decrypt = rte_cryptodev_sym_session_create(g_session_mp); 1844 if (NULL == vbdev->session_decrypt) { 1845 SPDK_ERRLOG("ERROR trying to create crypto session!\n"); 1846 rc = -EINVAL; 1847 goto error_session_de_create; 1848 } 1849 1850 /* Init our per vbdev xform with the desired cipher options. */ 1851 vbdev->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; 1852 vbdev->cipher_xform.cipher.iv.offset = IV_OFFSET; 1853 if (strcmp(name->cipher, AES_CBC) == 0) { 1854 vbdev->cipher_xform.cipher.key.data = vbdev->key; 1855 vbdev->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC; 1856 vbdev->cipher_xform.cipher.key.length = AES_CBC_KEY_LENGTH; 1857 } else { 1858 vbdev->cipher_xform.cipher.key.data = vbdev->xts_key; 1859 vbdev->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_XTS; 1860 vbdev->cipher_xform.cipher.key.length = AES_XTS_KEY_LENGTH * 2; 1861 } 1862 vbdev->cipher_xform.cipher.iv.length = AES_CBC_IV_LENGTH; 1863 1864 vbdev->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT; 1865 rc = rte_cryptodev_sym_session_init(device->cdev_id, vbdev->session_encrypt, 1866 &vbdev->cipher_xform, 1867 g_session_mp_priv ? g_session_mp_priv : g_session_mp); 1868 if (rc < 0) { 1869 SPDK_ERRLOG("ERROR trying to init encrypt session!\n"); 1870 rc = -EINVAL; 1871 goto error_session_init; 1872 } 1873 1874 vbdev->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT; 1875 rc = rte_cryptodev_sym_session_init(device->cdev_id, vbdev->session_decrypt, 1876 &vbdev->cipher_xform, 1877 g_session_mp_priv ? g_session_mp_priv : g_session_mp); 1878 if (rc < 0) { 1879 SPDK_ERRLOG("ERROR trying to init decrypt session!\n"); 1880 rc = -EINVAL; 1881 goto error_session_init; 1882 } 1883 1884 rc = spdk_bdev_register(&vbdev->crypto_bdev); 1885 if (rc < 0) { 1886 SPDK_ERRLOG("ERROR trying to register bdev\n"); 1887 rc = -EINVAL; 1888 goto error_bdev_register; 1889 } 1890 SPDK_DEBUGLOG(vbdev_crypto, "registered io_device and virtual bdev for: %s\n", 1891 name->vbdev_name); 1892 break; 1893 } 1894 1895 return rc; 1896 1897 /* Error cleanup paths. */ 1898 error_bdev_register: 1899 error_session_init: 1900 rte_cryptodev_sym_session_free(vbdev->session_decrypt); 1901 error_session_de_create: 1902 rte_cryptodev_sym_session_free(vbdev->session_encrypt); 1903 error_session_en_create: 1904 error_cant_find_devid: 1905 error_claim: 1906 spdk_bdev_close(vbdev->base_desc); 1907 TAILQ_REMOVE(&g_vbdev_crypto, vbdev, link); 1908 spdk_io_device_unregister(vbdev, NULL); 1909 free(vbdev->xts_key); 1910 error_xts_key: 1911 error_open: 1912 free(vbdev->drv_name); 1913 error_drv_name: 1914 free(vbdev->key2); 1915 error_alloc_key2: 1916 free(vbdev->key); 1917 error_alloc_key: 1918 free(vbdev->crypto_bdev.name); 1919 error_bdev_name: 1920 free(vbdev); 1921 error_vbdev_alloc: 1922 g_number_of_claimed_volumes--; 1923 return rc; 1924 } 1925 1926 /* RPC entry for deleting a crypto vbdev. */ 1927 void 1928 delete_crypto_disk(struct spdk_bdev *bdev, spdk_delete_crypto_complete cb_fn, 1929 void *cb_arg) 1930 { 1931 struct bdev_names *name; 1932 1933 if (!bdev || bdev->module != &crypto_if) { 1934 cb_fn(cb_arg, -ENODEV); 1935 return; 1936 } 1937 1938 /* Remove the association (vbdev, bdev) from g_bdev_names. This is required so that the 1939 * vbdev does not get re-created if the same bdev is constructed at some other time, 1940 * unless the underlying bdev was hot-removed. 1941 */ 1942 TAILQ_FOREACH(name, &g_bdev_names, link) { 1943 if (strcmp(name->vbdev_name, bdev->name) == 0) { 1944 TAILQ_REMOVE(&g_bdev_names, name, link); 1945 free(name->bdev_name); 1946 free(name->vbdev_name); 1947 free(name->drv_name); 1948 free(name->key); 1949 free(name->key2); 1950 free(name); 1951 break; 1952 } 1953 } 1954 1955 /* Additional cleanup happens in the destruct callback. */ 1956 spdk_bdev_unregister(bdev, cb_fn, cb_arg); 1957 } 1958 1959 /* Because we specified this function in our crypto bdev function table when we 1960 * registered our crypto bdev, we'll get this call anytime a new bdev shows up. 1961 * Here we need to decide if we care about it and if so what to do. We 1962 * parsed the config file at init so we check the new bdev against the list 1963 * we built up at that time and if the user configured us to attach to this 1964 * bdev, here's where we do it. 1965 */ 1966 static void 1967 vbdev_crypto_examine(struct spdk_bdev *bdev) 1968 { 1969 vbdev_crypto_claim(spdk_bdev_get_name(bdev)); 1970 spdk_bdev_module_examine_done(&crypto_if); 1971 } 1972 1973 SPDK_LOG_REGISTER_COMPONENT(vbdev_crypto) 1974