1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUcryptoION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "vbdev_crypto.h" 35 36 #include "spdk/env.h" 37 #include "spdk/likely.h" 38 #include "spdk/endian.h" 39 #include "spdk/thread.h" 40 #include "spdk/bdev_module.h" 41 #include "spdk/log.h" 42 43 #include <rte_config.h> 44 #include <rte_bus_vdev.h> 45 #include <rte_crypto.h> 46 #include <rte_cryptodev.h> 47 #include <rte_mbuf_dyn.h> 48 49 /* Used to store IO context in mbuf */ 50 static const struct rte_mbuf_dynfield rte_mbuf_dynfield_io_context = { 51 .name = "context_bdev_io", 52 .size = sizeof(uint64_t), 53 .align = __alignof__(uint64_t), 54 .flags = 0, 55 }; 56 static int g_mbuf_offset; 57 58 /* To add support for new device types, follow the examples of the following... 59 * Note that the string names are defined by the DPDK PMD in question so be 60 * sure to use the exact names. 61 */ 62 #define MAX_NUM_DRV_TYPES 2 63 64 /* The VF spread is the number of queue pairs between virtual functions, we use this to 65 * load balance the QAT device. 66 */ 67 #define QAT_VF_SPREAD 32 68 static uint8_t g_qat_total_qp = 0; 69 static uint8_t g_next_qat_index; 70 71 const char *g_driver_names[MAX_NUM_DRV_TYPES] = { AESNI_MB, QAT }; 72 73 /* Global list of available crypto devices. */ 74 struct vbdev_dev { 75 struct rte_cryptodev_info cdev_info; /* includes device friendly name */ 76 uint8_t cdev_id; /* identifier for the device */ 77 TAILQ_ENTRY(vbdev_dev) link; 78 }; 79 static TAILQ_HEAD(, vbdev_dev) g_vbdev_devs = TAILQ_HEAD_INITIALIZER(g_vbdev_devs); 80 81 /* Global list and lock for unique device/queue pair combos. We keep 1 list per supported PMD 82 * so that we can optimize per PMD where it make sense. For example, with QAT there an optimal 83 * pattern for assigning queue pairs where with AESNI there is not. 84 */ 85 struct device_qp { 86 struct vbdev_dev *device; /* ptr to crypto device */ 87 uint8_t qp; /* queue pair for this node */ 88 bool in_use; /* whether this node is in use or not */ 89 uint8_t index; /* used by QAT to load balance placement of qpairs */ 90 TAILQ_ENTRY(device_qp) link; 91 }; 92 static TAILQ_HEAD(, device_qp) g_device_qp_qat = TAILQ_HEAD_INITIALIZER(g_device_qp_qat); 93 static TAILQ_HEAD(, device_qp) g_device_qp_aesni_mb = TAILQ_HEAD_INITIALIZER(g_device_qp_aesni_mb); 94 static pthread_mutex_t g_device_qp_lock = PTHREAD_MUTEX_INITIALIZER; 95 96 97 /* In order to limit the number of resources we need to do one crypto 98 * operation per LBA (we use LBA as IV), we tell the bdev layer that 99 * our max IO size is something reasonable. Units here are in bytes. 100 */ 101 #define CRYPTO_MAX_IO (64 * 1024) 102 103 /* This controls how many ops will be dequeued from the crypto driver in one run 104 * of the poller. It is mainly a performance knob as it effectively determines how 105 * much work the poller has to do. However even that can vary between crypto drivers 106 * as the AESNI_MB driver for example does all the crypto work on dequeue whereas the 107 * QAT driver just dequeues what has been completed already. 108 */ 109 #define MAX_DEQUEUE_BURST_SIZE 64 110 111 /* When enqueueing, we need to supply the crypto driver with an array of pointers to 112 * operation structs. As each of these can be max 512B, we can adjust the CRYPTO_MAX_IO 113 * value in conjunction with the other defines to make sure we're not using crazy amounts 114 * of memory. All of these numbers can and probably should be adjusted based on the 115 * workload. By default we'll use the worst case (smallest) block size for the 116 * minimum number of array entries. As an example, a CRYPTO_MAX_IO size of 64K with 512B 117 * blocks would give us an enqueue array size of 128. 118 */ 119 #define MAX_ENQUEUE_ARRAY_SIZE (CRYPTO_MAX_IO / 512) 120 121 /* The number of MBUFS we need must be a power of two and to support other small IOs 122 * in addition to the limits mentioned above, we go to the next power of two. It is 123 * big number because it is one mempool for source and destination mbufs. It may 124 * need to be bigger to support multiple crypto drivers at once. 125 */ 126 #define NUM_MBUFS 32768 127 #define POOL_CACHE_SIZE 256 128 #define MAX_CRYPTO_VOLUMES 128 129 #define NUM_SESSIONS (2 * MAX_CRYPTO_VOLUMES) 130 #define SESS_MEMPOOL_CACHE_SIZE 0 131 uint8_t g_number_of_claimed_volumes = 0; 132 133 /* This is the max number of IOs we can supply to any crypto device QP at one time. 134 * It can vary between drivers. 135 */ 136 #define CRYPTO_QP_DESCRIPTORS 2048 137 138 /* Specific to AES_CBC. */ 139 #define IV_LENGTH 16 140 #define AES_CBC_KEY_LENGTH 16 141 #define AES_XTS_KEY_LENGTH 16 /* XTS uses 2 keys, each of this size. */ 142 #define AESNI_MB_NUM_QP 64 143 144 /* Common for suported devices. */ 145 #define IV_OFFSET (sizeof(struct rte_crypto_op) + \ 146 sizeof(struct rte_crypto_sym_op)) 147 #define QUEUED_OP_OFFSET (IV_OFFSET + IV_LENGTH) 148 149 static void _complete_internal_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg); 150 static void _complete_internal_read(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg); 151 static void _complete_internal_write(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg); 152 static void vbdev_crypto_examine(struct spdk_bdev *bdev); 153 static int vbdev_crypto_claim(const char *bdev_name); 154 static void vbdev_crypto_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io); 155 156 /* List of crypto_bdev names and their base bdevs via configuration file. */ 157 struct bdev_names { 158 char *vbdev_name; /* name of the vbdev to create */ 159 char *bdev_name; /* base bdev name */ 160 161 /* Note, for dev/test we allow use of key in the config file, for production 162 * use, you must use an RPC to specify the key for security reasons. 163 */ 164 uint8_t *key; /* key per bdev */ 165 char *drv_name; /* name of the crypto device driver */ 166 char *cipher; /* AES_CBC or AES_XTS */ 167 uint8_t *key2; /* key #2 for AES_XTS, per bdev */ 168 TAILQ_ENTRY(bdev_names) link; 169 }; 170 static TAILQ_HEAD(, bdev_names) g_bdev_names = TAILQ_HEAD_INITIALIZER(g_bdev_names); 171 172 /* List of virtual bdevs and associated info for each. We keep the device friendly name here even 173 * though its also in the device struct because we use it early on. 174 */ 175 struct vbdev_crypto { 176 struct spdk_bdev *base_bdev; /* the thing we're attaching to */ 177 struct spdk_bdev_desc *base_desc; /* its descriptor we get from open */ 178 struct spdk_bdev crypto_bdev; /* the crypto virtual bdev */ 179 uint8_t *key; /* key per bdev */ 180 uint8_t *key2; /* for XTS */ 181 uint8_t *xts_key; /* key + key 2 */ 182 char *drv_name; /* name of the crypto device driver */ 183 char *cipher; /* cipher used */ 184 uint32_t qp_desc_nr; /* number of qp descriptors */ 185 struct rte_cryptodev_sym_session *session_encrypt; /* encryption session for this bdev */ 186 struct rte_cryptodev_sym_session *session_decrypt; /* decryption session for this bdev */ 187 struct rte_crypto_sym_xform cipher_xform; /* crypto control struct for this bdev */ 188 TAILQ_ENTRY(vbdev_crypto) link; 189 struct spdk_thread *thread; /* thread where base device is opened */ 190 }; 191 static TAILQ_HEAD(, vbdev_crypto) g_vbdev_crypto = TAILQ_HEAD_INITIALIZER(g_vbdev_crypto); 192 193 /* Shared mempools between all devices on this system */ 194 static struct rte_mempool *g_session_mp = NULL; 195 static struct rte_mempool *g_session_mp_priv = NULL; 196 static struct rte_mempool *g_mbuf_mp = NULL; /* mbuf mempool */ 197 static struct rte_mempool *g_crypto_op_mp = NULL; /* crypto operations, must be rte* mempool */ 198 199 static struct rte_mbuf_ext_shared_info g_shinfo = {}; /* used by DPDK mbuf macro */ 200 201 /* For queueing up crypto operations that we can't submit for some reason */ 202 struct vbdev_crypto_op { 203 uint8_t cdev_id; 204 uint8_t qp; 205 struct rte_crypto_op *crypto_op; 206 struct spdk_bdev_io *bdev_io; 207 TAILQ_ENTRY(vbdev_crypto_op) link; 208 }; 209 #define QUEUED_OP_LENGTH (sizeof(struct vbdev_crypto_op)) 210 211 /* The crypto vbdev channel struct. It is allocated and freed on my behalf by the io channel code. 212 * We store things in here that are needed on per thread basis like the base_channel for this thread, 213 * and the poller for this thread. 214 */ 215 struct crypto_io_channel { 216 struct spdk_io_channel *base_ch; /* IO channel of base device */ 217 struct spdk_poller *poller; /* completion poller */ 218 struct device_qp *device_qp; /* unique device/qp combination for this channel */ 219 TAILQ_HEAD(, spdk_bdev_io) pending_cry_ios; /* outstanding operations to the crypto device */ 220 struct spdk_io_channel_iter *iter; /* used with for_each_channel in reset */ 221 TAILQ_HEAD(, vbdev_crypto_op) queued_cry_ops; /* queued for re-submission to CryptoDev */ 222 }; 223 224 /* This is the crypto per IO context that the bdev layer allocates for us opaquely and attaches to 225 * each IO for us. 226 */ 227 struct crypto_bdev_io { 228 int cryop_cnt_remaining; /* counter used when completing crypto ops */ 229 struct crypto_io_channel *crypto_ch; /* need to store for crypto completion handling */ 230 struct vbdev_crypto *crypto_bdev; /* the crypto node struct associated with this IO */ 231 struct spdk_bdev_io *orig_io; /* the original IO */ 232 struct spdk_bdev_io *read_io; /* the read IO we issued */ 233 int8_t bdev_io_status; /* the status we'll report back on the bdev IO */ 234 bool on_pending_list; 235 /* Used for the single contiguous buffer that serves as the crypto destination target for writes */ 236 uint64_t aux_num_blocks; /* num of blocks for the contiguous buffer */ 237 uint64_t aux_offset_blocks; /* block offset on media */ 238 void *aux_buf_raw; /* raw buffer that the bdev layer gave us for write buffer */ 239 struct iovec aux_buf_iov; /* iov representing aligned contig write buffer */ 240 241 /* for bdev_io_wait */ 242 struct spdk_bdev_io_wait_entry bdev_io_wait; 243 struct spdk_io_channel *ch; 244 }; 245 246 /* Called by vbdev_crypto_init_crypto_drivers() to init each discovered crypto device */ 247 static int 248 create_vbdev_dev(uint8_t index, uint16_t num_lcores) 249 { 250 struct vbdev_dev *device; 251 uint8_t j, cdev_id, cdrv_id; 252 struct device_qp *dev_qp; 253 struct device_qp *tmp_qp; 254 uint32_t qp_desc_nr; 255 int rc; 256 TAILQ_HEAD(device_qps, device_qp) *dev_qp_head; 257 258 device = calloc(1, sizeof(struct vbdev_dev)); 259 if (!device) { 260 return -ENOMEM; 261 } 262 263 /* Get details about this device. */ 264 rte_cryptodev_info_get(index, &device->cdev_info); 265 cdrv_id = device->cdev_info.driver_id; 266 cdev_id = device->cdev_id = index; 267 268 /* QAT_ASYM devices are not supported at this time. */ 269 if (strcmp(device->cdev_info.driver_name, QAT_ASYM) == 0) { 270 free(device); 271 return 0; 272 } 273 274 /* Before going any further, make sure we have enough resources for this 275 * device type to function. We need a unique queue pair per core accross each 276 * device type to remain lockless.... 277 */ 278 if ((rte_cryptodev_device_count_by_driver(cdrv_id) * 279 device->cdev_info.max_nb_queue_pairs) < num_lcores) { 280 SPDK_ERRLOG("Insufficient unique queue pairs available for %s\n", 281 device->cdev_info.driver_name); 282 SPDK_ERRLOG("Either add more crypto devices or decrease core count\n"); 283 rc = -EINVAL; 284 goto err; 285 } 286 287 /* Setup queue pairs. */ 288 struct rte_cryptodev_config conf = { 289 .nb_queue_pairs = device->cdev_info.max_nb_queue_pairs, 290 .socket_id = SPDK_ENV_SOCKET_ID_ANY 291 }; 292 293 rc = rte_cryptodev_configure(cdev_id, &conf); 294 if (rc < 0) { 295 SPDK_ERRLOG("Failed to configure cryptodev %u\n", cdev_id); 296 rc = -EINVAL; 297 goto err; 298 } 299 300 /* Select the right device/qp list based on driver name 301 * or error if it does not exist. 302 */ 303 if (strcmp(device->cdev_info.driver_name, QAT) == 0) { 304 dev_qp_head = (struct device_qps *)&g_device_qp_qat; 305 qp_desc_nr = CRYPTO_QP_DESCRIPTORS; 306 } else if (strcmp(device->cdev_info.driver_name, AESNI_MB) == 0) { 307 dev_qp_head = (struct device_qps *)&g_device_qp_aesni_mb; 308 qp_desc_nr = CRYPTO_QP_DESCRIPTORS; 309 } else { 310 SPDK_ERRLOG("Failed to start device %u. Invalid driver name \"%s\"\n", 311 cdev_id, device->cdev_info.driver_name); 312 rc = -EINVAL; 313 goto err_qp_setup; 314 } 315 316 struct rte_cryptodev_qp_conf qp_conf = { 317 .nb_descriptors = qp_desc_nr, 318 .mp_session = g_session_mp, 319 .mp_session_private = g_session_mp_priv, 320 }; 321 322 /* Pre-setup all potential qpairs now and assign them in the channel 323 * callback. If we were to create them there, we'd have to stop the 324 * entire device affecting all other threads that might be using it 325 * even on other queue pairs. 326 */ 327 for (j = 0; j < device->cdev_info.max_nb_queue_pairs; j++) { 328 rc = rte_cryptodev_queue_pair_setup(cdev_id, j, &qp_conf, SOCKET_ID_ANY); 329 if (rc < 0) { 330 SPDK_ERRLOG("Failed to setup queue pair %u on " 331 "cryptodev %u\n", j, cdev_id); 332 rc = -EINVAL; 333 goto err_qp_setup; 334 } 335 } 336 337 rc = rte_cryptodev_start(cdev_id); 338 if (rc < 0) { 339 SPDK_ERRLOG("Failed to start device %u: error %d\n", 340 cdev_id, rc); 341 rc = -EINVAL; 342 goto err_dev_start; 343 } 344 345 /* Build up lists of device/qp combinations per PMD */ 346 for (j = 0; j < device->cdev_info.max_nb_queue_pairs; j++) { 347 dev_qp = calloc(1, sizeof(struct device_qp)); 348 if (!dev_qp) { 349 rc = -ENOMEM; 350 goto err_qp_alloc; 351 } 352 dev_qp->device = device; 353 dev_qp->qp = j; 354 dev_qp->in_use = false; 355 if (strcmp(device->cdev_info.driver_name, QAT) == 0) { 356 g_qat_total_qp++; 357 } 358 TAILQ_INSERT_TAIL(dev_qp_head, dev_qp, link); 359 } 360 361 /* Add to our list of available crypto devices. */ 362 TAILQ_INSERT_TAIL(&g_vbdev_devs, device, link); 363 364 return 0; 365 err_qp_alloc: 366 TAILQ_FOREACH_SAFE(dev_qp, dev_qp_head, link, tmp_qp) { 367 if (dev_qp->device->cdev_id != device->cdev_id) { 368 continue; 369 } 370 TAILQ_REMOVE(dev_qp_head, dev_qp, link); 371 if (dev_qp_head == (struct device_qps *)&g_device_qp_qat) { 372 g_qat_total_qp--; 373 } 374 free(dev_qp); 375 } 376 rte_cryptodev_stop(cdev_id); 377 err_dev_start: 378 err_qp_setup: 379 rte_cryptodev_close(cdev_id); 380 err: 381 free(device); 382 383 return rc; 384 } 385 386 static void 387 release_vbdev_dev(struct vbdev_dev *device) 388 { 389 struct device_qp *dev_qp; 390 struct device_qp *tmp_qp; 391 TAILQ_HEAD(device_qps, device_qp) *dev_qp_head = NULL; 392 393 assert(device); 394 395 /* Select the right device/qp list based on driver name. */ 396 if (strcmp(device->cdev_info.driver_name, QAT) == 0) { 397 dev_qp_head = (struct device_qps *)&g_device_qp_qat; 398 } else if (strcmp(device->cdev_info.driver_name, AESNI_MB) == 0) { 399 dev_qp_head = (struct device_qps *)&g_device_qp_aesni_mb; 400 } 401 if (dev_qp_head) { 402 TAILQ_FOREACH_SAFE(dev_qp, dev_qp_head, link, tmp_qp) { 403 /* Remove only qps of our device even if the driver names matches. */ 404 if (dev_qp->device->cdev_id != device->cdev_id) { 405 continue; 406 } 407 TAILQ_REMOVE(dev_qp_head, dev_qp, link); 408 if (dev_qp_head == (struct device_qps *)&g_device_qp_qat) { 409 g_qat_total_qp--; 410 } 411 free(dev_qp); 412 } 413 } 414 rte_cryptodev_stop(device->cdev_id); 415 rte_cryptodev_close(device->cdev_id); 416 free(device); 417 } 418 419 /* Dummy function used by DPDK to free ext attached buffers to mbufs, we free them ourselves but 420 * this callback has to be here. */ 421 static void shinfo_free_cb(void *arg1, void *arg2) 422 { 423 } 424 425 /* This is called from the module's init function. We setup all crypto devices early on as we are unable 426 * to easily dynamically configure queue pairs after the drivers are up and running. So, here, we 427 * configure the max capabilities of each device and assign threads to queue pairs as channels are 428 * requested. 429 */ 430 static int 431 vbdev_crypto_init_crypto_drivers(void) 432 { 433 uint8_t cdev_count; 434 uint8_t cdev_id; 435 int i, rc; 436 struct vbdev_dev *device; 437 struct vbdev_dev *tmp_dev; 438 struct device_qp *dev_qp; 439 unsigned int max_sess_size = 0, sess_size; 440 uint16_t num_lcores = rte_lcore_count(); 441 char aesni_args[32]; 442 443 /* Only the first call, via RPC or module init should init the crypto drivers. */ 444 if (g_session_mp != NULL) { 445 return 0; 446 } 447 448 /* We always init AESNI_MB */ 449 snprintf(aesni_args, sizeof(aesni_args), "max_nb_queue_pairs=%d", AESNI_MB_NUM_QP); 450 rc = rte_vdev_init(AESNI_MB, aesni_args); 451 if (rc) { 452 SPDK_NOTICELOG("Failed to create virtual PMD %s: error %d. " 453 "Possibly %s is not supported by DPDK library. " 454 "Keep going...\n", AESNI_MB, rc, AESNI_MB); 455 } 456 457 /* If we have no crypto devices, there's no reason to continue. */ 458 cdev_count = rte_cryptodev_count(); 459 if (cdev_count == 0) { 460 return 0; 461 } 462 463 g_mbuf_offset = rte_mbuf_dynfield_register(&rte_mbuf_dynfield_io_context); 464 if (g_mbuf_offset < 0) { 465 SPDK_ERRLOG("error registering dynamic field with DPDK\n"); 466 return -EINVAL; 467 } 468 469 /* 470 * Create global mempools, shared by all devices regardless of type. 471 */ 472 473 /* First determine max session size, most pools are shared by all the devices, 474 * so we need to find the global max sessions size. 475 */ 476 for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) { 477 sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id); 478 if (sess_size > max_sess_size) { 479 max_sess_size = sess_size; 480 } 481 } 482 483 g_session_mp_priv = rte_mempool_create("session_mp_priv", NUM_SESSIONS, max_sess_size, 484 SESS_MEMPOOL_CACHE_SIZE, 0, NULL, NULL, NULL, 485 NULL, SOCKET_ID_ANY, 0); 486 if (g_session_mp_priv == NULL) { 487 SPDK_ERRLOG("Cannot create private session pool max size 0x%x\n", max_sess_size); 488 return -ENOMEM; 489 } 490 491 g_session_mp = rte_cryptodev_sym_session_pool_create( 492 "session_mp", 493 NUM_SESSIONS, 0, SESS_MEMPOOL_CACHE_SIZE, 0, 494 SOCKET_ID_ANY); 495 if (g_session_mp == NULL) { 496 SPDK_ERRLOG("Cannot create session pool max size 0x%x\n", max_sess_size); 497 rc = -ENOMEM; 498 goto error_create_session_mp; 499 } 500 501 g_mbuf_mp = rte_pktmbuf_pool_create("mbuf_mp", NUM_MBUFS, POOL_CACHE_SIZE, 502 0, 0, SPDK_ENV_SOCKET_ID_ANY); 503 if (g_mbuf_mp == NULL) { 504 SPDK_ERRLOG("Cannot create mbuf pool\n"); 505 rc = -ENOMEM; 506 goto error_create_mbuf; 507 } 508 509 /* We use per op private data to store the IV and our own struct 510 * for queueing ops. 511 */ 512 g_crypto_op_mp = rte_crypto_op_pool_create("op_mp", 513 RTE_CRYPTO_OP_TYPE_SYMMETRIC, 514 NUM_MBUFS, 515 POOL_CACHE_SIZE, 516 IV_LENGTH + QUEUED_OP_LENGTH, 517 rte_socket_id()); 518 519 if (g_crypto_op_mp == NULL) { 520 SPDK_ERRLOG("Cannot create op pool\n"); 521 rc = -ENOMEM; 522 goto error_create_op; 523 } 524 525 /* Init all devices */ 526 for (i = 0; i < cdev_count; i++) { 527 rc = create_vbdev_dev(i, num_lcores); 528 if (rc) { 529 goto err; 530 } 531 } 532 533 /* Assign index values to the QAT device qp nodes so that we can 534 * assign them for optimal performance. 535 */ 536 i = 0; 537 TAILQ_FOREACH(dev_qp, &g_device_qp_qat, link) { 538 dev_qp->index = i++; 539 } 540 541 g_shinfo.free_cb = shinfo_free_cb; 542 return 0; 543 544 /* Error cleanup paths. */ 545 err: 546 TAILQ_FOREACH_SAFE(device, &g_vbdev_devs, link, tmp_dev) { 547 TAILQ_REMOVE(&g_vbdev_devs, device, link); 548 release_vbdev_dev(device); 549 } 550 rte_mempool_free(g_crypto_op_mp); 551 g_crypto_op_mp = NULL; 552 error_create_op: 553 rte_mempool_free(g_mbuf_mp); 554 g_mbuf_mp = NULL; 555 error_create_mbuf: 556 rte_mempool_free(g_session_mp); 557 g_session_mp = NULL; 558 error_create_session_mp: 559 if (g_session_mp_priv != NULL) { 560 rte_mempool_free(g_session_mp_priv); 561 g_session_mp_priv = NULL; 562 } 563 return rc; 564 } 565 566 /* Following an encrypt or decrypt we need to then either write the encrypted data or finish 567 * the read on decrypted data. Do that here. 568 */ 569 static void 570 _crypto_operation_complete(struct spdk_bdev_io *bdev_io) 571 { 572 struct vbdev_crypto *crypto_bdev = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_crypto, 573 crypto_bdev); 574 struct crypto_bdev_io *io_ctx = (struct crypto_bdev_io *)bdev_io->driver_ctx; 575 struct crypto_io_channel *crypto_ch = io_ctx->crypto_ch; 576 struct spdk_bdev_io *free_me = io_ctx->read_io; 577 int rc = 0; 578 579 /* Can also be called from the crypto_dev_poller() to fail the stuck re-enqueue ops IO. */ 580 if (io_ctx->on_pending_list) { 581 TAILQ_REMOVE(&crypto_ch->pending_cry_ios, bdev_io, module_link); 582 io_ctx->on_pending_list = false; 583 } 584 585 if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 586 587 /* Complete the original IO and then free the one that we created 588 * as a result of issuing an IO via submit_request. 589 */ 590 if (io_ctx->bdev_io_status != SPDK_BDEV_IO_STATUS_FAILED) { 591 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS); 592 } else { 593 SPDK_ERRLOG("Issue with decryption on bdev_io %p\n", bdev_io); 594 rc = -EINVAL; 595 } 596 spdk_bdev_free_io(free_me); 597 598 } else if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 599 600 if (io_ctx->bdev_io_status != SPDK_BDEV_IO_STATUS_FAILED) { 601 /* Write the encrypted data. */ 602 rc = spdk_bdev_writev_blocks(crypto_bdev->base_desc, crypto_ch->base_ch, 603 &io_ctx->aux_buf_iov, 1, io_ctx->aux_offset_blocks, 604 io_ctx->aux_num_blocks, _complete_internal_write, 605 bdev_io); 606 } else { 607 SPDK_ERRLOG("Issue with encryption on bdev_io %p\n", bdev_io); 608 rc = -EINVAL; 609 } 610 611 } else { 612 SPDK_ERRLOG("Unknown bdev type %u on crypto operation completion\n", 613 bdev_io->type); 614 rc = -EINVAL; 615 } 616 617 if (rc) { 618 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 619 } 620 } 621 622 static void 623 cancel_queued_crypto_ops(struct crypto_io_channel *crypto_ch, struct spdk_bdev_io *bdev_io) 624 { 625 struct rte_mbuf *mbufs_to_free[2 * MAX_DEQUEUE_BURST_SIZE]; 626 struct rte_crypto_op *dequeued_ops[MAX_DEQUEUE_BURST_SIZE]; 627 struct vbdev_crypto_op *op_to_cancel, *tmp_op; 628 struct rte_crypto_op *crypto_op; 629 int num_mbufs, num_dequeued_ops; 630 631 /* Remove all ops from the failed IO. Since we don't know the 632 * order we have to check them all. */ 633 num_mbufs = 0; 634 num_dequeued_ops = 0; 635 TAILQ_FOREACH_SAFE(op_to_cancel, &crypto_ch->queued_cry_ops, link, tmp_op) { 636 /* Checking if this is our op. One IO contains multiple ops. */ 637 if (bdev_io == op_to_cancel->bdev_io) { 638 crypto_op = op_to_cancel->crypto_op; 639 TAILQ_REMOVE(&crypto_ch->queued_cry_ops, op_to_cancel, link); 640 641 /* Populating lists for freeing mbufs and ops. */ 642 mbufs_to_free[num_mbufs++] = (void *)crypto_op->sym->m_src; 643 if (crypto_op->sym->m_dst) { 644 mbufs_to_free[num_mbufs++] = (void *)crypto_op->sym->m_dst; 645 } 646 dequeued_ops[num_dequeued_ops++] = crypto_op; 647 } 648 } 649 650 /* Now bulk free both mbufs and crypto operations. */ 651 if (num_dequeued_ops > 0) { 652 rte_mempool_put_bulk(g_crypto_op_mp, (void **)dequeued_ops, 653 num_dequeued_ops); 654 assert(num_mbufs > 0); 655 /* This also releases chained mbufs if any. */ 656 rte_pktmbuf_free_bulk(mbufs_to_free, num_mbufs); 657 } 658 } 659 660 static int _crypto_operation(struct spdk_bdev_io *bdev_io, 661 enum rte_crypto_cipher_operation crypto_op, 662 void *aux_buf); 663 664 /* This is the poller for the crypto device. It uses a single API to dequeue whatever is ready at 665 * the device. Then we need to decide if what we've got so far (including previous poller 666 * runs) totals up to one or more complete bdev_ios and if so continue with the bdev_io 667 * accordingly. This means either completing a read or issuing a new write. 668 */ 669 static int 670 crypto_dev_poller(void *args) 671 { 672 struct crypto_io_channel *crypto_ch = args; 673 uint8_t cdev_id = crypto_ch->device_qp->device->cdev_id; 674 int i, num_dequeued_ops, num_enqueued_ops; 675 struct spdk_bdev_io *bdev_io = NULL; 676 struct crypto_bdev_io *io_ctx = NULL; 677 struct rte_crypto_op *dequeued_ops[MAX_DEQUEUE_BURST_SIZE]; 678 struct rte_mbuf *mbufs_to_free[2 * MAX_DEQUEUE_BURST_SIZE]; 679 int num_mbufs = 0; 680 struct vbdev_crypto_op *op_to_resubmit; 681 682 /* Each run of the poller will get just what the device has available 683 * at the moment we call it, we don't check again after draining the 684 * first batch. 685 */ 686 num_dequeued_ops = rte_cryptodev_dequeue_burst(cdev_id, crypto_ch->device_qp->qp, 687 dequeued_ops, MAX_DEQUEUE_BURST_SIZE); 688 689 /* Check if operation was processed successfully */ 690 for (i = 0; i < num_dequeued_ops; i++) { 691 692 /* We don't know the order or association of the crypto ops wrt any 693 * particular bdev_io so need to look at each and determine if it's 694 * the last one for it's bdev_io or not. 695 */ 696 bdev_io = (struct spdk_bdev_io *)*RTE_MBUF_DYNFIELD(dequeued_ops[i]->sym->m_src, g_mbuf_offset, 697 uint64_t *); 698 assert(bdev_io != NULL); 699 io_ctx = (struct crypto_bdev_io *)bdev_io->driver_ctx; 700 701 if (dequeued_ops[i]->status != RTE_CRYPTO_OP_STATUS_SUCCESS) { 702 SPDK_ERRLOG("error with op %d status %u\n", i, 703 dequeued_ops[i]->status); 704 /* Update the bdev status to error, we'll still process the 705 * rest of the crypto ops for this bdev_io though so they 706 * aren't left hanging. 707 */ 708 io_ctx->bdev_io_status = SPDK_BDEV_IO_STATUS_FAILED; 709 } 710 711 assert(io_ctx->cryop_cnt_remaining > 0); 712 713 /* Return the associated src and dst mbufs by collecting them into 714 * an array that we can use the bulk API to free after the loop. 715 */ 716 *RTE_MBUF_DYNFIELD(dequeued_ops[i]->sym->m_src, g_mbuf_offset, uint64_t *) = 0; 717 mbufs_to_free[num_mbufs++] = (void *)dequeued_ops[i]->sym->m_src; 718 if (dequeued_ops[i]->sym->m_dst) { 719 mbufs_to_free[num_mbufs++] = (void *)dequeued_ops[i]->sym->m_dst; 720 } 721 722 /* done encrypting, complete the bdev_io */ 723 if (--io_ctx->cryop_cnt_remaining == 0) { 724 725 /* If we're completing this with an outstanding reset we need 726 * to fail it. 727 */ 728 if (crypto_ch->iter) { 729 io_ctx->bdev_io_status = SPDK_BDEV_IO_STATUS_FAILED; 730 } 731 732 /* Complete the IO */ 733 _crypto_operation_complete(bdev_io); 734 } 735 } 736 737 /* Now bulk free both mbufs and crypto operations. */ 738 if (num_dequeued_ops > 0) { 739 rte_mempool_put_bulk(g_crypto_op_mp, 740 (void **)dequeued_ops, 741 num_dequeued_ops); 742 assert(num_mbufs > 0); 743 /* This also releases chained mbufs if any. */ 744 rte_pktmbuf_free_bulk(mbufs_to_free, num_mbufs); 745 } 746 747 /* Check if there are any pending crypto ops to process */ 748 while (!TAILQ_EMPTY(&crypto_ch->queued_cry_ops)) { 749 op_to_resubmit = TAILQ_FIRST(&crypto_ch->queued_cry_ops); 750 bdev_io = op_to_resubmit->bdev_io; 751 io_ctx = (struct crypto_bdev_io *)bdev_io->driver_ctx; 752 num_enqueued_ops = rte_cryptodev_enqueue_burst(op_to_resubmit->cdev_id, 753 op_to_resubmit->qp, 754 &op_to_resubmit->crypto_op, 755 1); 756 if (num_enqueued_ops == 1) { 757 /* Make sure we don't put this on twice as one bdev_io is made up 758 * of many crypto ops. 759 */ 760 if (io_ctx->on_pending_list == false) { 761 TAILQ_INSERT_TAIL(&crypto_ch->pending_cry_ios, bdev_io, module_link); 762 io_ctx->on_pending_list = true; 763 } 764 TAILQ_REMOVE(&crypto_ch->queued_cry_ops, op_to_resubmit, link); 765 } else { 766 if (op_to_resubmit->crypto_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) { 767 /* If we couldn't get one, just break and try again later. */ 768 break; 769 } else { 770 /* Something is really wrong with the op. Most probably the 771 * mbuf is broken or the HW is not able to process the request. 772 * Fail the IO and remove its ops from the queued ops list. */ 773 io_ctx->bdev_io_status = SPDK_BDEV_IO_STATUS_FAILED; 774 775 cancel_queued_crypto_ops(crypto_ch, bdev_io); 776 777 /* Fail the IO if there is nothing left on device. */ 778 if (--io_ctx->cryop_cnt_remaining == 0) { 779 _crypto_operation_complete(bdev_io); 780 } 781 } 782 783 } 784 } 785 786 /* If the channel iter is not NULL, we need to continue to poll 787 * until the pending list is empty, then we can move on to the 788 * next channel. 789 */ 790 if (crypto_ch->iter && TAILQ_EMPTY(&crypto_ch->pending_cry_ios)) { 791 SPDK_NOTICELOG("Channel %p has been quiesced.\n", crypto_ch); 792 spdk_for_each_channel_continue(crypto_ch->iter, 0); 793 crypto_ch->iter = NULL; 794 } 795 796 return num_dequeued_ops; 797 } 798 799 /* Allocate the new mbuf of @remainder size with data pointed by @addr and attach 800 * it to the @orig_mbuf. */ 801 static int 802 mbuf_chain_remainder(struct spdk_bdev_io *bdev_io, struct rte_mbuf *orig_mbuf, 803 uint8_t *addr, uint32_t remainder) 804 { 805 uint64_t phys_addr, phys_len; 806 struct rte_mbuf *chain_mbuf; 807 int rc; 808 809 phys_len = remainder; 810 phys_addr = spdk_vtophys((void *)addr, &phys_len); 811 if (spdk_unlikely(phys_addr == SPDK_VTOPHYS_ERROR || phys_len != remainder)) { 812 return -EFAULT; 813 } 814 rc = rte_pktmbuf_alloc_bulk(g_mbuf_mp, (struct rte_mbuf **)&chain_mbuf, 1); 815 if (spdk_unlikely(rc)) { 816 return -ENOMEM; 817 } 818 /* Store context in every mbuf as we don't know anything about completion order */ 819 *RTE_MBUF_DYNFIELD(chain_mbuf, g_mbuf_offset, uint64_t *) = (uint64_t)bdev_io; 820 rte_pktmbuf_attach_extbuf(chain_mbuf, addr, phys_addr, phys_len, &g_shinfo); 821 rte_pktmbuf_append(chain_mbuf, phys_len); 822 823 /* Chained buffer is released by rte_pktbuf_free_bulk() automagicaly. */ 824 rte_pktmbuf_chain(orig_mbuf, chain_mbuf); 825 return 0; 826 } 827 828 /* Attach data buffer pointed by @addr to @mbuf. Return utilized len of the 829 * contiguous space that was physically available. */ 830 static uint64_t 831 mbuf_attach_buf(struct spdk_bdev_io *bdev_io, struct rte_mbuf *mbuf, 832 uint8_t *addr, uint32_t len) 833 { 834 uint64_t phys_addr, phys_len; 835 836 /* Store context in every mbuf as we don't know anything about completion order */ 837 *RTE_MBUF_DYNFIELD(mbuf, g_mbuf_offset, uint64_t *) = (uint64_t)bdev_io; 838 839 phys_len = len; 840 phys_addr = spdk_vtophys((void *)addr, &phys_len); 841 if (spdk_unlikely(phys_addr == SPDK_VTOPHYS_ERROR || phys_len == 0)) { 842 return 0; 843 } 844 assert(phys_len <= len); 845 846 /* Set the mbuf elements address and length. */ 847 rte_pktmbuf_attach_extbuf(mbuf, addr, phys_addr, phys_len, &g_shinfo); 848 rte_pktmbuf_append(mbuf, phys_len); 849 850 return phys_len; 851 } 852 853 /* We're either encrypting on the way down or decrypting on the way back. */ 854 static int 855 _crypto_operation(struct spdk_bdev_io *bdev_io, enum rte_crypto_cipher_operation crypto_op, 856 void *aux_buf) 857 { 858 uint16_t num_enqueued_ops = 0; 859 uint32_t cryop_cnt = bdev_io->u.bdev.num_blocks; 860 struct crypto_bdev_io *io_ctx = (struct crypto_bdev_io *)bdev_io->driver_ctx; 861 struct crypto_io_channel *crypto_ch = io_ctx->crypto_ch; 862 uint8_t cdev_id = crypto_ch->device_qp->device->cdev_id; 863 uint32_t crypto_len = io_ctx->crypto_bdev->crypto_bdev.blocklen; 864 uint64_t total_length = bdev_io->u.bdev.num_blocks * crypto_len; 865 int rc; 866 uint32_t iov_index = 0; 867 uint32_t allocated = 0; 868 uint8_t *current_iov = NULL; 869 uint64_t total_remaining = 0; 870 uint64_t current_iov_remaining = 0; 871 uint32_t crypto_index = 0; 872 uint32_t en_offset = 0; 873 struct rte_crypto_op *crypto_ops[MAX_ENQUEUE_ARRAY_SIZE]; 874 struct rte_mbuf *src_mbufs[MAX_ENQUEUE_ARRAY_SIZE]; 875 struct rte_mbuf *dst_mbufs[MAX_ENQUEUE_ARRAY_SIZE]; 876 int burst; 877 struct vbdev_crypto_op *op_to_queue; 878 uint64_t alignment = spdk_bdev_get_buf_align(&io_ctx->crypto_bdev->crypto_bdev); 879 880 assert((bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen) <= CRYPTO_MAX_IO); 881 882 /* Get the number of source mbufs that we need. These will always be 1:1 because we 883 * don't support chaining. The reason we don't is because of our decision to use 884 * LBA as IV, there can be no case where we'd need >1 mbuf per crypto op or the 885 * op would be > 1 LBA. 886 */ 887 rc = rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, cryop_cnt); 888 if (rc) { 889 SPDK_ERRLOG("ERROR trying to get src_mbufs!\n"); 890 return -ENOMEM; 891 } 892 893 /* Get the same amount but these buffers to describe the encrypted data location (dst). */ 894 if (crypto_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) { 895 rc = rte_pktmbuf_alloc_bulk(g_mbuf_mp, dst_mbufs, cryop_cnt); 896 if (rc) { 897 SPDK_ERRLOG("ERROR trying to get dst_mbufs!\n"); 898 rc = -ENOMEM; 899 goto error_get_dst; 900 } 901 } 902 903 #ifdef __clang_analyzer__ 904 /* silence scan-build false positive */ 905 SPDK_CLANG_ANALYZER_PREINIT_PTR_ARRAY(crypto_ops, MAX_ENQUEUE_ARRAY_SIZE, 0x1000); 906 #endif 907 /* Allocate crypto operations. */ 908 allocated = rte_crypto_op_bulk_alloc(g_crypto_op_mp, 909 RTE_CRYPTO_OP_TYPE_SYMMETRIC, 910 crypto_ops, cryop_cnt); 911 if (allocated < cryop_cnt) { 912 SPDK_ERRLOG("ERROR trying to get crypto ops!\n"); 913 rc = -ENOMEM; 914 goto error_get_ops; 915 } 916 917 /* For encryption, we need to prepare a single contiguous buffer as the encryption 918 * destination, we'll then pass that along for the write after encryption is done. 919 * This is done to avoiding encrypting the provided write buffer which may be 920 * undesirable in some use cases. 921 */ 922 if (crypto_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) { 923 io_ctx->aux_buf_iov.iov_len = total_length; 924 io_ctx->aux_buf_raw = aux_buf; 925 io_ctx->aux_buf_iov.iov_base = (void *)(((uintptr_t)aux_buf + (alignment - 1)) & ~(alignment - 1)); 926 io_ctx->aux_offset_blocks = bdev_io->u.bdev.offset_blocks; 927 io_ctx->aux_num_blocks = bdev_io->u.bdev.num_blocks; 928 } 929 930 /* This value is used in the completion callback to determine when the bdev_io is 931 * complete. 932 */ 933 io_ctx->cryop_cnt_remaining = cryop_cnt; 934 935 /* As we don't support chaining because of a decision to use LBA as IV, construction 936 * of crypto operations is straightforward. We build both the op, the mbuf and the 937 * dst_mbuf in our local arrays by looping through the length of the bdev IO and 938 * picking off LBA sized blocks of memory from the IOVs as we walk through them. Each 939 * LBA sized chunk of memory will correspond 1:1 to a crypto operation and a single 940 * mbuf per crypto operation. 941 */ 942 total_remaining = total_length; 943 current_iov = bdev_io->u.bdev.iovs[iov_index].iov_base; 944 current_iov_remaining = bdev_io->u.bdev.iovs[iov_index].iov_len; 945 do { 946 uint8_t *iv_ptr; 947 uint8_t *buf_addr; 948 uint64_t phys_len; 949 uint32_t remainder; 950 uint64_t op_block_offset; 951 952 phys_len = mbuf_attach_buf(bdev_io, src_mbufs[crypto_index], 953 current_iov, crypto_len); 954 if (spdk_unlikely(phys_len == 0)) { 955 goto error_attach_session; 956 rc = -EFAULT; 957 } 958 959 /* Handle the case of page boundary. */ 960 remainder = crypto_len - phys_len; 961 if (spdk_unlikely(remainder > 0)) { 962 rc = mbuf_chain_remainder(bdev_io, src_mbufs[crypto_index], 963 current_iov + phys_len, remainder); 964 if (spdk_unlikely(rc)) { 965 goto error_attach_session; 966 } 967 } 968 969 /* Set the IV - we use the LBA of the crypto_op */ 970 iv_ptr = rte_crypto_op_ctod_offset(crypto_ops[crypto_index], uint8_t *, 971 IV_OFFSET); 972 memset(iv_ptr, 0, IV_LENGTH); 973 op_block_offset = bdev_io->u.bdev.offset_blocks + crypto_index; 974 rte_memcpy(iv_ptr, &op_block_offset, sizeof(uint64_t)); 975 976 /* Set the data to encrypt/decrypt length */ 977 crypto_ops[crypto_index]->sym->cipher.data.length = crypto_len; 978 crypto_ops[crypto_index]->sym->cipher.data.offset = 0; 979 980 /* link the mbuf to the crypto op. */ 981 crypto_ops[crypto_index]->sym->m_src = src_mbufs[crypto_index]; 982 983 /* For encrypt, point the destination to a buffer we allocate and redirect the bdev_io 984 * that will be used to process the write on completion to the same buffer. Setting 985 * up the en_buffer is a little simpler as we know the destination buffer is single IOV. 986 */ 987 if (crypto_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) { 988 buf_addr = io_ctx->aux_buf_iov.iov_base + en_offset; 989 phys_len = mbuf_attach_buf(bdev_io, dst_mbufs[crypto_index], 990 buf_addr, crypto_len); 991 if (spdk_unlikely(phys_len == 0)) { 992 rc = -EFAULT; 993 goto error_attach_session; 994 } 995 996 crypto_ops[crypto_index]->sym->m_dst = dst_mbufs[crypto_index]; 997 en_offset += phys_len; 998 999 /* Handle the case of page boundary. */ 1000 remainder = crypto_len - phys_len; 1001 if (spdk_unlikely(remainder > 0)) { 1002 rc = mbuf_chain_remainder(bdev_io, dst_mbufs[crypto_index], 1003 buf_addr + phys_len, remainder); 1004 if (spdk_unlikely(rc)) { 1005 goto error_attach_session; 1006 } 1007 en_offset += remainder; 1008 } 1009 1010 /* Attach the crypto session to the operation */ 1011 rc = rte_crypto_op_attach_sym_session(crypto_ops[crypto_index], 1012 io_ctx->crypto_bdev->session_encrypt); 1013 if (rc) { 1014 rc = -EINVAL; 1015 goto error_attach_session; 1016 } 1017 } else { 1018 crypto_ops[crypto_index]->sym->m_dst = NULL; 1019 1020 /* Attach the crypto session to the operation */ 1021 rc = rte_crypto_op_attach_sym_session(crypto_ops[crypto_index], 1022 io_ctx->crypto_bdev->session_decrypt); 1023 if (rc) { 1024 rc = -EINVAL; 1025 goto error_attach_session; 1026 } 1027 } 1028 1029 /* Subtract our running totals for the op in progress and the overall bdev io */ 1030 total_remaining -= crypto_len; 1031 current_iov_remaining -= crypto_len; 1032 1033 /* move our current IOV pointer accordingly. */ 1034 current_iov += crypto_len; 1035 1036 /* move on to the next crypto operation */ 1037 crypto_index++; 1038 1039 /* If we're done with this IOV, move to the next one. */ 1040 if (current_iov_remaining == 0 && total_remaining > 0) { 1041 iov_index++; 1042 current_iov = bdev_io->u.bdev.iovs[iov_index].iov_base; 1043 current_iov_remaining = bdev_io->u.bdev.iovs[iov_index].iov_len; 1044 } 1045 } while (total_remaining > 0); 1046 1047 /* Enqueue everything we've got but limit by the max number of descriptors we 1048 * configured the crypto device for. 1049 */ 1050 burst = spdk_min(cryop_cnt, io_ctx->crypto_bdev->qp_desc_nr); 1051 num_enqueued_ops = rte_cryptodev_enqueue_burst(cdev_id, crypto_ch->device_qp->qp, 1052 &crypto_ops[0], 1053 burst); 1054 1055 /* Add this bdev_io to our outstanding list if any of its crypto ops made it. */ 1056 if (num_enqueued_ops > 0) { 1057 TAILQ_INSERT_TAIL(&crypto_ch->pending_cry_ios, bdev_io, module_link); 1058 io_ctx->on_pending_list = true; 1059 } 1060 /* We were unable to enqueue everything but did get some, so need to decide what 1061 * to do based on the status of the last op. 1062 */ 1063 if (num_enqueued_ops < cryop_cnt) { 1064 switch (crypto_ops[num_enqueued_ops]->status) { 1065 case RTE_CRYPTO_OP_STATUS_NOT_PROCESSED: 1066 /* Queue them up on a linked list to be resubmitted via the poller. */ 1067 for (crypto_index = num_enqueued_ops; crypto_index < cryop_cnt; crypto_index++) { 1068 op_to_queue = (struct vbdev_crypto_op *)rte_crypto_op_ctod_offset(crypto_ops[crypto_index], 1069 uint8_t *, QUEUED_OP_OFFSET); 1070 op_to_queue->cdev_id = cdev_id; 1071 op_to_queue->qp = crypto_ch->device_qp->qp; 1072 op_to_queue->crypto_op = crypto_ops[crypto_index]; 1073 op_to_queue->bdev_io = bdev_io; 1074 TAILQ_INSERT_TAIL(&crypto_ch->queued_cry_ops, 1075 op_to_queue, 1076 link); 1077 } 1078 break; 1079 default: 1080 /* For all other statuses, set the io_ctx bdev_io status so that 1081 * the poller will pick the failure up for the overall bdev status. 1082 */ 1083 io_ctx->bdev_io_status = SPDK_BDEV_IO_STATUS_FAILED; 1084 if (num_enqueued_ops == 0) { 1085 /* If nothing was enqueued, but the last one wasn't because of 1086 * busy, fail it now as the poller won't know anything about it. 1087 */ 1088 rc = -EINVAL; 1089 goto error_attach_session; 1090 } 1091 break; 1092 } 1093 } 1094 1095 return rc; 1096 1097 /* Error cleanup paths. */ 1098 error_attach_session: 1099 error_get_ops: 1100 if (crypto_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) { 1101 /* This also releases chained mbufs if any. */ 1102 rte_pktmbuf_free_bulk(dst_mbufs, cryop_cnt); 1103 } 1104 if (allocated > 0) { 1105 rte_mempool_put_bulk(g_crypto_op_mp, (void **)crypto_ops, 1106 allocated); 1107 } 1108 error_get_dst: 1109 /* This also releases chained mbufs if any. */ 1110 rte_pktmbuf_free_bulk(src_mbufs, cryop_cnt); 1111 return rc; 1112 } 1113 1114 /* This function is called after all channels have been quiesced following 1115 * a bdev reset. 1116 */ 1117 static void 1118 _ch_quiesce_done(struct spdk_io_channel_iter *i, int status) 1119 { 1120 struct crypto_bdev_io *io_ctx = spdk_io_channel_iter_get_ctx(i); 1121 1122 assert(TAILQ_EMPTY(&io_ctx->crypto_ch->pending_cry_ios)); 1123 assert(io_ctx->orig_io != NULL); 1124 1125 spdk_bdev_io_complete(io_ctx->orig_io, SPDK_BDEV_IO_STATUS_SUCCESS); 1126 } 1127 1128 /* This function is called per channel to quiesce IOs before completing a 1129 * bdev reset that we received. 1130 */ 1131 static void 1132 _ch_quiesce(struct spdk_io_channel_iter *i) 1133 { 1134 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); 1135 struct crypto_io_channel *crypto_ch = spdk_io_channel_get_ctx(ch); 1136 1137 crypto_ch->iter = i; 1138 /* When the poller runs, it will see the non-NULL iter and handle 1139 * the quiesce. 1140 */ 1141 } 1142 1143 /* Completion callback for IO that were issued from this bdev other than read/write. 1144 * They have their own for readability. 1145 */ 1146 static void 1147 _complete_internal_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 1148 { 1149 struct spdk_bdev_io *orig_io = cb_arg; 1150 int status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED; 1151 1152 if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) { 1153 struct crypto_bdev_io *orig_ctx = (struct crypto_bdev_io *)orig_io->driver_ctx; 1154 1155 assert(orig_io == orig_ctx->orig_io); 1156 1157 spdk_bdev_free_io(bdev_io); 1158 1159 spdk_for_each_channel(orig_ctx->crypto_bdev, 1160 _ch_quiesce, 1161 orig_ctx, 1162 _ch_quiesce_done); 1163 return; 1164 } 1165 1166 spdk_bdev_io_complete(orig_io, status); 1167 spdk_bdev_free_io(bdev_io); 1168 } 1169 1170 /* Completion callback for writes that were issued from this bdev. */ 1171 static void 1172 _complete_internal_write(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 1173 { 1174 struct spdk_bdev_io *orig_io = cb_arg; 1175 int status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED; 1176 struct crypto_bdev_io *orig_ctx = (struct crypto_bdev_io *)orig_io->driver_ctx; 1177 1178 spdk_bdev_io_put_aux_buf(orig_io, orig_ctx->aux_buf_raw); 1179 1180 spdk_bdev_io_complete(orig_io, status); 1181 spdk_bdev_free_io(bdev_io); 1182 } 1183 1184 /* Completion callback for reads that were issued from this bdev. */ 1185 static void 1186 _complete_internal_read(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 1187 { 1188 struct spdk_bdev_io *orig_io = cb_arg; 1189 struct crypto_bdev_io *orig_ctx = (struct crypto_bdev_io *)orig_io->driver_ctx; 1190 1191 if (success) { 1192 1193 /* Save off this bdev_io so it can be freed after decryption. */ 1194 orig_ctx->read_io = bdev_io; 1195 1196 if (!_crypto_operation(orig_io, RTE_CRYPTO_CIPHER_OP_DECRYPT, NULL)) { 1197 return; 1198 } else { 1199 SPDK_ERRLOG("ERROR decrypting\n"); 1200 } 1201 } else { 1202 SPDK_ERRLOG("ERROR on read prior to decrypting\n"); 1203 } 1204 1205 spdk_bdev_io_complete(orig_io, SPDK_BDEV_IO_STATUS_FAILED); 1206 spdk_bdev_free_io(bdev_io); 1207 } 1208 1209 static void 1210 vbdev_crypto_resubmit_io(void *arg) 1211 { 1212 struct spdk_bdev_io *bdev_io = (struct spdk_bdev_io *)arg; 1213 struct crypto_bdev_io *io_ctx = (struct crypto_bdev_io *)bdev_io->driver_ctx; 1214 1215 vbdev_crypto_submit_request(io_ctx->ch, bdev_io); 1216 } 1217 1218 static void 1219 vbdev_crypto_queue_io(struct spdk_bdev_io *bdev_io) 1220 { 1221 struct crypto_bdev_io *io_ctx = (struct crypto_bdev_io *)bdev_io->driver_ctx; 1222 int rc; 1223 1224 io_ctx->bdev_io_wait.bdev = bdev_io->bdev; 1225 io_ctx->bdev_io_wait.cb_fn = vbdev_crypto_resubmit_io; 1226 io_ctx->bdev_io_wait.cb_arg = bdev_io; 1227 1228 rc = spdk_bdev_queue_io_wait(bdev_io->bdev, io_ctx->crypto_ch->base_ch, &io_ctx->bdev_io_wait); 1229 if (rc != 0) { 1230 SPDK_ERRLOG("Queue io failed in vbdev_crypto_queue_io, rc=%d.\n", rc); 1231 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 1232 } 1233 } 1234 1235 /* Callback for getting a buf from the bdev pool in the event that the caller passed 1236 * in NULL, we need to own the buffer so it doesn't get freed by another vbdev module 1237 * beneath us before we're done with it. 1238 */ 1239 static void 1240 crypto_read_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 1241 bool success) 1242 { 1243 struct vbdev_crypto *crypto_bdev = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_crypto, 1244 crypto_bdev); 1245 struct crypto_io_channel *crypto_ch = spdk_io_channel_get_ctx(ch); 1246 struct crypto_bdev_io *io_ctx = (struct crypto_bdev_io *)bdev_io->driver_ctx; 1247 int rc; 1248 1249 if (!success) { 1250 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 1251 return; 1252 } 1253 1254 rc = spdk_bdev_readv_blocks(crypto_bdev->base_desc, crypto_ch->base_ch, bdev_io->u.bdev.iovs, 1255 bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks, 1256 bdev_io->u.bdev.num_blocks, _complete_internal_read, 1257 bdev_io); 1258 if (rc != 0) { 1259 if (rc == -ENOMEM) { 1260 SPDK_DEBUGLOG(vbdev_crypto, "No memory, queue the IO.\n"); 1261 io_ctx->ch = ch; 1262 vbdev_crypto_queue_io(bdev_io); 1263 } else { 1264 SPDK_ERRLOG("ERROR on bdev_io submission!\n"); 1265 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 1266 } 1267 } 1268 } 1269 1270 /* For encryption we don't want to encrypt the data in place as the host isn't 1271 * expecting us to mangle its data buffers so we need to encrypt into the bdev 1272 * aux buffer, then we can use that as the source for the disk data transfer. 1273 */ 1274 static void 1275 crypto_write_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 1276 void *aux_buf) 1277 { 1278 struct crypto_bdev_io *io_ctx = (struct crypto_bdev_io *)bdev_io->driver_ctx; 1279 int rc = 0; 1280 1281 rc = _crypto_operation(bdev_io, RTE_CRYPTO_CIPHER_OP_ENCRYPT, aux_buf); 1282 if (rc != 0) { 1283 spdk_bdev_io_put_aux_buf(bdev_io, aux_buf); 1284 if (rc == -ENOMEM) { 1285 SPDK_DEBUGLOG(vbdev_crypto, "No memory, queue the IO.\n"); 1286 io_ctx->ch = ch; 1287 vbdev_crypto_queue_io(bdev_io); 1288 } else { 1289 SPDK_ERRLOG("ERROR on bdev_io submission!\n"); 1290 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 1291 } 1292 } 1293 } 1294 1295 /* Called when someone submits IO to this crypto vbdev. For IO's not relevant to crypto, 1296 * we're simply passing it on here via SPDK IO calls which in turn allocate another bdev IO 1297 * and call our cpl callback provided below along with the original bdev_io so that we can 1298 * complete it once this IO completes. For crypto operations, we'll either encrypt it first 1299 * (writes) then call back into bdev to submit it or we'll submit a read and then catch it 1300 * on the way back for decryption. 1301 */ 1302 static void 1303 vbdev_crypto_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 1304 { 1305 struct vbdev_crypto *crypto_bdev = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_crypto, 1306 crypto_bdev); 1307 struct crypto_io_channel *crypto_ch = spdk_io_channel_get_ctx(ch); 1308 struct crypto_bdev_io *io_ctx = (struct crypto_bdev_io *)bdev_io->driver_ctx; 1309 int rc = 0; 1310 1311 memset(io_ctx, 0, sizeof(struct crypto_bdev_io)); 1312 io_ctx->crypto_bdev = crypto_bdev; 1313 io_ctx->crypto_ch = crypto_ch; 1314 io_ctx->orig_io = bdev_io; 1315 io_ctx->bdev_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1316 1317 switch (bdev_io->type) { 1318 case SPDK_BDEV_IO_TYPE_READ: 1319 spdk_bdev_io_get_buf(bdev_io, crypto_read_get_buf_cb, 1320 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 1321 break; 1322 case SPDK_BDEV_IO_TYPE_WRITE: 1323 /* Tell the bdev layer that we need an aux buf in addition to the data 1324 * buf already associated with the bdev. 1325 */ 1326 spdk_bdev_io_get_aux_buf(bdev_io, crypto_write_get_buf_cb); 1327 break; 1328 case SPDK_BDEV_IO_TYPE_UNMAP: 1329 rc = spdk_bdev_unmap_blocks(crypto_bdev->base_desc, crypto_ch->base_ch, 1330 bdev_io->u.bdev.offset_blocks, 1331 bdev_io->u.bdev.num_blocks, 1332 _complete_internal_io, bdev_io); 1333 break; 1334 case SPDK_BDEV_IO_TYPE_FLUSH: 1335 rc = spdk_bdev_flush_blocks(crypto_bdev->base_desc, crypto_ch->base_ch, 1336 bdev_io->u.bdev.offset_blocks, 1337 bdev_io->u.bdev.num_blocks, 1338 _complete_internal_io, bdev_io); 1339 break; 1340 case SPDK_BDEV_IO_TYPE_RESET: 1341 rc = spdk_bdev_reset(crypto_bdev->base_desc, crypto_ch->base_ch, 1342 _complete_internal_io, bdev_io); 1343 break; 1344 case SPDK_BDEV_IO_TYPE_WRITE_ZEROES: 1345 default: 1346 SPDK_ERRLOG("crypto: unknown I/O type %d\n", bdev_io->type); 1347 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 1348 return; 1349 } 1350 1351 if (rc != 0) { 1352 if (rc == -ENOMEM) { 1353 SPDK_DEBUGLOG(vbdev_crypto, "No memory, queue the IO.\n"); 1354 io_ctx->ch = ch; 1355 vbdev_crypto_queue_io(bdev_io); 1356 } else { 1357 SPDK_ERRLOG("ERROR on bdev_io submission!\n"); 1358 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 1359 } 1360 } 1361 } 1362 1363 /* We'll just call the base bdev and let it answer except for WZ command which 1364 * we always say we don't support so that the bdev layer will actually send us 1365 * real writes that we can encrypt. 1366 */ 1367 static bool 1368 vbdev_crypto_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type) 1369 { 1370 struct vbdev_crypto *crypto_bdev = (struct vbdev_crypto *)ctx; 1371 1372 switch (io_type) { 1373 case SPDK_BDEV_IO_TYPE_WRITE: 1374 case SPDK_BDEV_IO_TYPE_UNMAP: 1375 case SPDK_BDEV_IO_TYPE_RESET: 1376 case SPDK_BDEV_IO_TYPE_READ: 1377 case SPDK_BDEV_IO_TYPE_FLUSH: 1378 return spdk_bdev_io_type_supported(crypto_bdev->base_bdev, io_type); 1379 case SPDK_BDEV_IO_TYPE_WRITE_ZEROES: 1380 /* Force the bdev layer to issue actual writes of zeroes so we can 1381 * encrypt them as regular writes. 1382 */ 1383 default: 1384 return false; 1385 } 1386 } 1387 1388 /* Callback for unregistering the IO device. */ 1389 static void 1390 _device_unregister_cb(void *io_device) 1391 { 1392 struct vbdev_crypto *crypto_bdev = io_device; 1393 1394 /* Done with this crypto_bdev. */ 1395 rte_cryptodev_sym_session_free(crypto_bdev->session_decrypt); 1396 rte_cryptodev_sym_session_free(crypto_bdev->session_encrypt); 1397 free(crypto_bdev->drv_name); 1398 if (crypto_bdev->key) { 1399 memset(crypto_bdev->key, 0, strnlen(crypto_bdev->key, (AES_CBC_KEY_LENGTH + 1))); 1400 free(crypto_bdev->key); 1401 } 1402 if (crypto_bdev->key2) { 1403 memset(crypto_bdev->key2, 0, strnlen(crypto_bdev->key2, (AES_XTS_KEY_LENGTH + 1))); 1404 free(crypto_bdev->key2); 1405 } 1406 if (crypto_bdev->xts_key) { 1407 memset(crypto_bdev->xts_key, 0, strnlen(crypto_bdev->xts_key, (AES_XTS_KEY_LENGTH * 2) + 1)); 1408 free(crypto_bdev->xts_key); 1409 } 1410 free(crypto_bdev->crypto_bdev.name); 1411 free(crypto_bdev); 1412 } 1413 1414 /* Wrapper for the bdev close operation. */ 1415 static void 1416 _vbdev_crypto_destruct(void *ctx) 1417 { 1418 struct spdk_bdev_desc *desc = ctx; 1419 1420 spdk_bdev_close(desc); 1421 } 1422 1423 /* Called after we've unregistered following a hot remove callback. 1424 * Our finish entry point will be called next. 1425 */ 1426 static int 1427 vbdev_crypto_destruct(void *ctx) 1428 { 1429 struct vbdev_crypto *crypto_bdev = (struct vbdev_crypto *)ctx; 1430 1431 /* Remove this device from the internal list */ 1432 TAILQ_REMOVE(&g_vbdev_crypto, crypto_bdev, link); 1433 1434 /* Unclaim the underlying bdev. */ 1435 spdk_bdev_module_release_bdev(crypto_bdev->base_bdev); 1436 1437 /* Close the underlying bdev on its same opened thread. */ 1438 if (crypto_bdev->thread && crypto_bdev->thread != spdk_get_thread()) { 1439 spdk_thread_send_msg(crypto_bdev->thread, _vbdev_crypto_destruct, crypto_bdev->base_desc); 1440 } else { 1441 spdk_bdev_close(crypto_bdev->base_desc); 1442 } 1443 1444 /* Unregister the io_device. */ 1445 spdk_io_device_unregister(crypto_bdev, _device_unregister_cb); 1446 1447 g_number_of_claimed_volumes--; 1448 1449 return 0; 1450 } 1451 1452 /* We supplied this as an entry point for upper layers who want to communicate to this 1453 * bdev. This is how they get a channel. We are passed the same context we provided when 1454 * we created our crypto vbdev in examine() which, for this bdev, is the address of one of 1455 * our context nodes. From here we'll ask the SPDK channel code to fill out our channel 1456 * struct and we'll keep it in our crypto node. 1457 */ 1458 static struct spdk_io_channel * 1459 vbdev_crypto_get_io_channel(void *ctx) 1460 { 1461 struct vbdev_crypto *crypto_bdev = (struct vbdev_crypto *)ctx; 1462 1463 /* The IO channel code will allocate a channel for us which consists of 1464 * the SPDK channel structure plus the size of our crypto_io_channel struct 1465 * that we passed in when we registered our IO device. It will then call 1466 * our channel create callback to populate any elements that we need to 1467 * update. 1468 */ 1469 return spdk_get_io_channel(crypto_bdev); 1470 } 1471 1472 /* This is the output for bdev_get_bdevs() for this vbdev */ 1473 static int 1474 vbdev_crypto_dump_info_json(void *ctx, struct spdk_json_write_ctx *w) 1475 { 1476 struct vbdev_crypto *crypto_bdev = (struct vbdev_crypto *)ctx; 1477 1478 spdk_json_write_name(w, "crypto"); 1479 spdk_json_write_object_begin(w); 1480 spdk_json_write_named_string(w, "base_bdev_name", spdk_bdev_get_name(crypto_bdev->base_bdev)); 1481 spdk_json_write_named_string(w, "name", spdk_bdev_get_name(&crypto_bdev->crypto_bdev)); 1482 spdk_json_write_named_string(w, "crypto_pmd", crypto_bdev->drv_name); 1483 spdk_json_write_named_string(w, "key", crypto_bdev->key); 1484 if (strcmp(crypto_bdev->cipher, AES_XTS) == 0) { 1485 spdk_json_write_named_string(w, "key2", crypto_bdev->key2); 1486 } 1487 spdk_json_write_named_string(w, "cipher", crypto_bdev->cipher); 1488 spdk_json_write_object_end(w); 1489 return 0; 1490 } 1491 1492 static int 1493 vbdev_crypto_config_json(struct spdk_json_write_ctx *w) 1494 { 1495 struct vbdev_crypto *crypto_bdev; 1496 1497 TAILQ_FOREACH(crypto_bdev, &g_vbdev_crypto, link) { 1498 spdk_json_write_object_begin(w); 1499 spdk_json_write_named_string(w, "method", "bdev_crypto_create"); 1500 spdk_json_write_named_object_begin(w, "params"); 1501 spdk_json_write_named_string(w, "base_bdev_name", spdk_bdev_get_name(crypto_bdev->base_bdev)); 1502 spdk_json_write_named_string(w, "name", spdk_bdev_get_name(&crypto_bdev->crypto_bdev)); 1503 spdk_json_write_named_string(w, "crypto_pmd", crypto_bdev->drv_name); 1504 spdk_json_write_named_string(w, "key", crypto_bdev->key); 1505 if (strcmp(crypto_bdev->cipher, AES_XTS) == 0) { 1506 spdk_json_write_named_string(w, "key2", crypto_bdev->key2); 1507 } 1508 spdk_json_write_named_string(w, "cipher", crypto_bdev->cipher); 1509 spdk_json_write_object_end(w); 1510 spdk_json_write_object_end(w); 1511 } 1512 return 0; 1513 } 1514 1515 /* Helper function for the channel creation callback. */ 1516 static void 1517 _assign_device_qp(struct vbdev_crypto *crypto_bdev, struct device_qp *device_qp, 1518 struct crypto_io_channel *crypto_ch) 1519 { 1520 pthread_mutex_lock(&g_device_qp_lock); 1521 if (strcmp(crypto_bdev->drv_name, QAT) == 0) { 1522 /* For some QAT devices, the optimal qp to use is every 32nd as this spreads the 1523 * workload out over the multiple virtual functions in the device. For the devices 1524 * where this isn't the case, it doesn't hurt. 1525 */ 1526 TAILQ_FOREACH(device_qp, &g_device_qp_qat, link) { 1527 if (device_qp->index != g_next_qat_index) { 1528 continue; 1529 } 1530 if (device_qp->in_use == false) { 1531 crypto_ch->device_qp = device_qp; 1532 device_qp->in_use = true; 1533 g_next_qat_index = (g_next_qat_index + QAT_VF_SPREAD) % g_qat_total_qp; 1534 break; 1535 } else { 1536 /* if the preferred index is used, skip to the next one in this set. */ 1537 g_next_qat_index = (g_next_qat_index + 1) % g_qat_total_qp; 1538 } 1539 } 1540 } else if (strcmp(crypto_bdev->drv_name, AESNI_MB) == 0) { 1541 TAILQ_FOREACH(device_qp, &g_device_qp_aesni_mb, link) { 1542 if (device_qp->in_use == false) { 1543 crypto_ch->device_qp = device_qp; 1544 device_qp->in_use = true; 1545 break; 1546 } 1547 } 1548 } 1549 pthread_mutex_unlock(&g_device_qp_lock); 1550 } 1551 1552 /* We provide this callback for the SPDK channel code to create a channel using 1553 * the channel struct we provided in our module get_io_channel() entry point. Here 1554 * we get and save off an underlying base channel of the device below us so that 1555 * we can communicate with the base bdev on a per channel basis. We also register the 1556 * poller used to complete crypto operations from the device. 1557 */ 1558 static int 1559 crypto_bdev_ch_create_cb(void *io_device, void *ctx_buf) 1560 { 1561 struct crypto_io_channel *crypto_ch = ctx_buf; 1562 struct vbdev_crypto *crypto_bdev = io_device; 1563 struct device_qp *device_qp = NULL; 1564 1565 crypto_ch->base_ch = spdk_bdev_get_io_channel(crypto_bdev->base_desc); 1566 crypto_ch->poller = SPDK_POLLER_REGISTER(crypto_dev_poller, crypto_ch, 0); 1567 crypto_ch->device_qp = NULL; 1568 1569 /* Assign a device/qp combination that is unique per channel per PMD. */ 1570 _assign_device_qp(crypto_bdev, device_qp, crypto_ch); 1571 assert(crypto_ch->device_qp); 1572 1573 /* We use this queue to track outstanding IO in our layer. */ 1574 TAILQ_INIT(&crypto_ch->pending_cry_ios); 1575 1576 /* We use this to queue up crypto ops when the device is busy. */ 1577 TAILQ_INIT(&crypto_ch->queued_cry_ops); 1578 1579 return 0; 1580 } 1581 1582 /* We provide this callback for the SPDK channel code to destroy a channel 1583 * created with our create callback. We just need to undo anything we did 1584 * when we created. 1585 */ 1586 static void 1587 crypto_bdev_ch_destroy_cb(void *io_device, void *ctx_buf) 1588 { 1589 struct crypto_io_channel *crypto_ch = ctx_buf; 1590 1591 pthread_mutex_lock(&g_device_qp_lock); 1592 crypto_ch->device_qp->in_use = false; 1593 pthread_mutex_unlock(&g_device_qp_lock); 1594 1595 spdk_poller_unregister(&crypto_ch->poller); 1596 spdk_put_io_channel(crypto_ch->base_ch); 1597 } 1598 1599 /* Create the association from the bdev and vbdev name and insert 1600 * on the global list. */ 1601 static int 1602 vbdev_crypto_insert_name(const char *bdev_name, const char *vbdev_name, 1603 const char *crypto_pmd, const char *key, 1604 const char *cipher, const char *key2) 1605 { 1606 struct bdev_names *name; 1607 int rc, j; 1608 bool found = false; 1609 1610 TAILQ_FOREACH(name, &g_bdev_names, link) { 1611 if (strcmp(vbdev_name, name->vbdev_name) == 0) { 1612 SPDK_ERRLOG("crypto bdev %s already exists\n", vbdev_name); 1613 return -EEXIST; 1614 } 1615 } 1616 1617 name = calloc(1, sizeof(struct bdev_names)); 1618 if (!name) { 1619 SPDK_ERRLOG("could not allocate bdev_names\n"); 1620 return -ENOMEM; 1621 } 1622 1623 name->bdev_name = strdup(bdev_name); 1624 if (!name->bdev_name) { 1625 SPDK_ERRLOG("could not allocate name->bdev_name\n"); 1626 rc = -ENOMEM; 1627 goto error_alloc_bname; 1628 } 1629 1630 name->vbdev_name = strdup(vbdev_name); 1631 if (!name->vbdev_name) { 1632 SPDK_ERRLOG("could not allocate name->vbdev_name\n"); 1633 rc = -ENOMEM; 1634 goto error_alloc_vname; 1635 } 1636 1637 name->drv_name = strdup(crypto_pmd); 1638 if (!name->drv_name) { 1639 SPDK_ERRLOG("could not allocate name->drv_name\n"); 1640 rc = -ENOMEM; 1641 goto error_alloc_dname; 1642 } 1643 for (j = 0; j < MAX_NUM_DRV_TYPES ; j++) { 1644 if (strcmp(crypto_pmd, g_driver_names[j]) == 0) { 1645 found = true; 1646 break; 1647 } 1648 } 1649 if (!found) { 1650 SPDK_ERRLOG("invalid crypto PMD type %s\n", crypto_pmd); 1651 rc = -EINVAL; 1652 goto error_invalid_pmd; 1653 } 1654 1655 name->key = strdup(key); 1656 if (!name->key) { 1657 SPDK_ERRLOG("could not allocate name->key\n"); 1658 rc = -ENOMEM; 1659 goto error_alloc_key; 1660 } 1661 if (strnlen(name->key, (AES_CBC_KEY_LENGTH + 1)) != AES_CBC_KEY_LENGTH) { 1662 SPDK_ERRLOG("invalid AES_CBC key length\n"); 1663 rc = -EINVAL; 1664 goto error_invalid_key; 1665 } 1666 1667 if (strncmp(cipher, AES_XTS, sizeof(AES_XTS)) == 0) { 1668 /* To please scan-build, input validation makes sure we can't 1669 * have this cipher without providing a key2. 1670 */ 1671 name->cipher = AES_XTS; 1672 assert(key2); 1673 if (strnlen(key2, (AES_XTS_KEY_LENGTH + 1)) != AES_XTS_KEY_LENGTH) { 1674 SPDK_ERRLOG("invalid AES_XTS key length\n"); 1675 rc = -EINVAL; 1676 goto error_invalid_key2; 1677 } 1678 1679 name->key2 = strdup(key2); 1680 if (!name->key2) { 1681 SPDK_ERRLOG("could not allocate name->key2\n"); 1682 rc = -ENOMEM; 1683 goto error_alloc_key2; 1684 } 1685 } else if (strncmp(cipher, AES_CBC, sizeof(AES_CBC)) == 0) { 1686 name->cipher = AES_CBC; 1687 } else { 1688 SPDK_ERRLOG("Invalid cipher: %s\n", cipher); 1689 rc = -EINVAL; 1690 goto error_cipher; 1691 } 1692 1693 TAILQ_INSERT_TAIL(&g_bdev_names, name, link); 1694 1695 return 0; 1696 1697 /* Error cleanup paths. */ 1698 error_cipher: 1699 if (name->key2) { 1700 memset(name->key2, 0, strlen(name->key2)); 1701 free(name->key2); 1702 } 1703 error_alloc_key2: 1704 error_invalid_key2: 1705 error_invalid_key: 1706 if (name->key) { 1707 memset(name->key, 0, strlen(name->key)); 1708 free(name->key); 1709 } 1710 error_alloc_key: 1711 error_invalid_pmd: 1712 free(name->drv_name); 1713 error_alloc_dname: 1714 free(name->vbdev_name); 1715 error_alloc_vname: 1716 free(name->bdev_name); 1717 error_alloc_bname: 1718 free(name); 1719 return rc; 1720 } 1721 1722 /* RPC entry point for crypto creation. */ 1723 int 1724 create_crypto_disk(const char *bdev_name, const char *vbdev_name, 1725 const char *crypto_pmd, const char *key, 1726 const char *cipher, const char *key2) 1727 { 1728 int rc; 1729 1730 rc = vbdev_crypto_insert_name(bdev_name, vbdev_name, crypto_pmd, key, cipher, key2); 1731 if (rc) { 1732 return rc; 1733 } 1734 1735 rc = vbdev_crypto_claim(bdev_name); 1736 if (rc == -ENODEV) { 1737 SPDK_NOTICELOG("vbdev creation deferred pending base bdev arrival\n"); 1738 rc = 0; 1739 } 1740 1741 return rc; 1742 } 1743 1744 /* Called at driver init time, parses config file to prepare for examine calls, 1745 * also fully initializes the crypto drivers. 1746 */ 1747 static int 1748 vbdev_crypto_init(void) 1749 { 1750 int rc = 0; 1751 1752 /* Fully configure both SW and HW drivers. */ 1753 rc = vbdev_crypto_init_crypto_drivers(); 1754 if (rc) { 1755 SPDK_ERRLOG("Error setting up crypto devices\n"); 1756 } 1757 1758 return rc; 1759 } 1760 1761 /* Called when the entire module is being torn down. */ 1762 static void 1763 vbdev_crypto_finish(void) 1764 { 1765 struct bdev_names *name; 1766 struct vbdev_dev *device; 1767 1768 while ((name = TAILQ_FIRST(&g_bdev_names))) { 1769 TAILQ_REMOVE(&g_bdev_names, name, link); 1770 free(name->drv_name); 1771 memset(name->key, 0, strlen(name->key)); 1772 free(name->key); 1773 free(name->bdev_name); 1774 free(name->vbdev_name); 1775 if (name->key2) { 1776 memset(name->key2, 0, strlen(name->key2)); 1777 free(name->key2); 1778 } 1779 free(name); 1780 } 1781 1782 while ((device = TAILQ_FIRST(&g_vbdev_devs))) { 1783 TAILQ_REMOVE(&g_vbdev_devs, device, link); 1784 release_vbdev_dev(device); 1785 } 1786 rte_vdev_uninit(AESNI_MB); 1787 1788 /* These are removed in release_vbdev_dev() */ 1789 assert(TAILQ_EMPTY(&g_device_qp_qat)); 1790 assert(TAILQ_EMPTY(&g_device_qp_aesni_mb)); 1791 1792 rte_mempool_free(g_crypto_op_mp); 1793 rte_mempool_free(g_mbuf_mp); 1794 rte_mempool_free(g_session_mp); 1795 if (g_session_mp_priv != NULL) { 1796 rte_mempool_free(g_session_mp_priv); 1797 } 1798 } 1799 1800 /* During init we'll be asked how much memory we'd like passed to us 1801 * in bev_io structures as context. Here's where we specify how 1802 * much context we want per IO. 1803 */ 1804 static int 1805 vbdev_crypto_get_ctx_size(void) 1806 { 1807 return sizeof(struct crypto_bdev_io); 1808 } 1809 1810 static void 1811 vbdev_crypto_base_bdev_hotremove_cb(struct spdk_bdev *bdev_find) 1812 { 1813 struct vbdev_crypto *crypto_bdev, *tmp; 1814 1815 TAILQ_FOREACH_SAFE(crypto_bdev, &g_vbdev_crypto, link, tmp) { 1816 if (bdev_find == crypto_bdev->base_bdev) { 1817 spdk_bdev_unregister(&crypto_bdev->crypto_bdev, NULL, NULL); 1818 } 1819 } 1820 } 1821 1822 /* Called when the underlying base bdev triggers asynchronous event such as bdev removal. */ 1823 static void 1824 vbdev_crypto_base_bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, 1825 void *event_ctx) 1826 { 1827 switch (type) { 1828 case SPDK_BDEV_EVENT_REMOVE: 1829 vbdev_crypto_base_bdev_hotremove_cb(bdev); 1830 break; 1831 default: 1832 SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type); 1833 break; 1834 } 1835 } 1836 1837 static void 1838 vbdev_crypto_write_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w) 1839 { 1840 /* No config per bdev needed */ 1841 } 1842 1843 /* When we register our bdev this is how we specify our entry points. */ 1844 static const struct spdk_bdev_fn_table vbdev_crypto_fn_table = { 1845 .destruct = vbdev_crypto_destruct, 1846 .submit_request = vbdev_crypto_submit_request, 1847 .io_type_supported = vbdev_crypto_io_type_supported, 1848 .get_io_channel = vbdev_crypto_get_io_channel, 1849 .dump_info_json = vbdev_crypto_dump_info_json, 1850 .write_config_json = vbdev_crypto_write_config_json 1851 }; 1852 1853 static struct spdk_bdev_module crypto_if = { 1854 .name = "crypto", 1855 .module_init = vbdev_crypto_init, 1856 .get_ctx_size = vbdev_crypto_get_ctx_size, 1857 .examine_config = vbdev_crypto_examine, 1858 .module_fini = vbdev_crypto_finish, 1859 .config_json = vbdev_crypto_config_json 1860 }; 1861 1862 SPDK_BDEV_MODULE_REGISTER(crypto, &crypto_if) 1863 1864 static int 1865 vbdev_crypto_claim(const char *bdev_name) 1866 { 1867 struct bdev_names *name; 1868 struct vbdev_crypto *vbdev; 1869 struct vbdev_dev *device; 1870 struct spdk_bdev *bdev; 1871 bool found = false; 1872 int rc = 0; 1873 1874 if (g_number_of_claimed_volumes >= MAX_CRYPTO_VOLUMES) { 1875 SPDK_DEBUGLOG(vbdev_crypto, "Reached max number of claimed volumes\n"); 1876 return -EINVAL; 1877 } 1878 g_number_of_claimed_volumes++; 1879 1880 /* Check our list of names from config versus this bdev and if 1881 * there's a match, create the crypto_bdev & bdev accordingly. 1882 */ 1883 TAILQ_FOREACH(name, &g_bdev_names, link) { 1884 if (strcmp(name->bdev_name, bdev_name) != 0) { 1885 continue; 1886 } 1887 SPDK_DEBUGLOG(vbdev_crypto, "Match on %s\n", bdev_name); 1888 1889 vbdev = calloc(1, sizeof(struct vbdev_crypto)); 1890 if (!vbdev) { 1891 SPDK_ERRLOG("could not allocate crypto_bdev\n"); 1892 rc = -ENOMEM; 1893 goto error_vbdev_alloc; 1894 } 1895 1896 vbdev->crypto_bdev.name = strdup(name->vbdev_name); 1897 if (!vbdev->crypto_bdev.name) { 1898 SPDK_ERRLOG("could not allocate crypto_bdev name\n"); 1899 rc = -ENOMEM; 1900 goto error_bdev_name; 1901 } 1902 1903 vbdev->key = strdup(name->key); 1904 if (!vbdev->key) { 1905 SPDK_ERRLOG("could not allocate crypto_bdev key\n"); 1906 rc = -ENOMEM; 1907 goto error_alloc_key; 1908 } 1909 1910 if (name->key2) { 1911 vbdev->key2 = strdup(name->key2); 1912 if (!vbdev->key2) { 1913 SPDK_ERRLOG("could not allocate crypto_bdev key2\n"); 1914 rc = -ENOMEM; 1915 goto error_alloc_key2; 1916 } 1917 } 1918 1919 vbdev->drv_name = strdup(name->drv_name); 1920 if (!vbdev->drv_name) { 1921 SPDK_ERRLOG("could not allocate crypto_bdev drv_name\n"); 1922 rc = -ENOMEM; 1923 goto error_drv_name; 1924 } 1925 1926 vbdev->crypto_bdev.product_name = "crypto"; 1927 1928 rc = spdk_bdev_open_ext(bdev_name, true, vbdev_crypto_base_bdev_event_cb, 1929 NULL, &vbdev->base_desc); 1930 if (rc) { 1931 if (rc != -ENODEV) { 1932 SPDK_ERRLOG("could not open bdev %s\n", bdev_name); 1933 } 1934 goto error_open; 1935 } 1936 1937 bdev = spdk_bdev_desc_get_bdev(vbdev->base_desc); 1938 vbdev->base_bdev = bdev; 1939 1940 vbdev->qp_desc_nr = CRYPTO_QP_DESCRIPTORS; 1941 1942 vbdev->crypto_bdev.write_cache = bdev->write_cache; 1943 vbdev->cipher = AES_CBC; 1944 if (strcmp(vbdev->drv_name, QAT) == 0) { 1945 vbdev->crypto_bdev.required_alignment = 1946 spdk_max(spdk_u32log2(bdev->blocklen), bdev->required_alignment); 1947 SPDK_NOTICELOG("QAT in use: Required alignment set to %u\n", 1948 vbdev->crypto_bdev.required_alignment); 1949 if (strcmp(name->cipher, AES_CBC) == 0) { 1950 SPDK_NOTICELOG("QAT using cipher: AES_CBC\n"); 1951 } else { 1952 SPDK_NOTICELOG("QAT using cipher: AES_XTS\n"); 1953 vbdev->cipher = AES_XTS; 1954 /* DPDK expects they keys to be concatenated together. */ 1955 vbdev->xts_key = calloc(1, (AES_XTS_KEY_LENGTH * 2) + 1); 1956 if (vbdev->xts_key == NULL) { 1957 SPDK_ERRLOG("could not allocate memory for XTS key\n"); 1958 rc = -ENOMEM; 1959 goto error_xts_key; 1960 } 1961 memcpy(vbdev->xts_key, vbdev->key, AES_XTS_KEY_LENGTH); 1962 assert(name->key2); 1963 memcpy(vbdev->xts_key + AES_XTS_KEY_LENGTH, name->key2, AES_XTS_KEY_LENGTH + 1); 1964 } 1965 } else { 1966 vbdev->crypto_bdev.required_alignment = bdev->required_alignment; 1967 } 1968 /* Note: CRYPTO_MAX_IO is in units of bytes, optimal_io_boundary is 1969 * in units of blocks. 1970 */ 1971 if (bdev->optimal_io_boundary > 0) { 1972 vbdev->crypto_bdev.optimal_io_boundary = 1973 spdk_min((CRYPTO_MAX_IO / bdev->blocklen), bdev->optimal_io_boundary); 1974 } else { 1975 vbdev->crypto_bdev.optimal_io_boundary = (CRYPTO_MAX_IO / bdev->blocklen); 1976 } 1977 vbdev->crypto_bdev.split_on_optimal_io_boundary = true; 1978 vbdev->crypto_bdev.blocklen = bdev->blocklen; 1979 vbdev->crypto_bdev.blockcnt = bdev->blockcnt; 1980 1981 /* This is the context that is passed to us when the bdev 1982 * layer calls in so we'll save our crypto_bdev node here. 1983 */ 1984 vbdev->crypto_bdev.ctxt = vbdev; 1985 vbdev->crypto_bdev.fn_table = &vbdev_crypto_fn_table; 1986 vbdev->crypto_bdev.module = &crypto_if; 1987 TAILQ_INSERT_TAIL(&g_vbdev_crypto, vbdev, link); 1988 1989 spdk_io_device_register(vbdev, crypto_bdev_ch_create_cb, crypto_bdev_ch_destroy_cb, 1990 sizeof(struct crypto_io_channel), vbdev->crypto_bdev.name); 1991 1992 /* Save the thread where the base device is opened */ 1993 vbdev->thread = spdk_get_thread(); 1994 1995 rc = spdk_bdev_module_claim_bdev(bdev, vbdev->base_desc, vbdev->crypto_bdev.module); 1996 if (rc) { 1997 SPDK_ERRLOG("could not claim bdev %s\n", spdk_bdev_get_name(bdev)); 1998 goto error_claim; 1999 } 2000 2001 /* To init the session we have to get the cryptoDev device ID for this vbdev */ 2002 TAILQ_FOREACH(device, &g_vbdev_devs, link) { 2003 if (strcmp(device->cdev_info.driver_name, vbdev->drv_name) == 0) { 2004 found = true; 2005 break; 2006 } 2007 } 2008 if (found == false) { 2009 SPDK_ERRLOG("ERROR can't match crypto device driver to crypto vbdev!\n"); 2010 rc = -EINVAL; 2011 goto error_cant_find_devid; 2012 } 2013 2014 /* Get sessions. */ 2015 vbdev->session_encrypt = rte_cryptodev_sym_session_create(g_session_mp); 2016 if (NULL == vbdev->session_encrypt) { 2017 SPDK_ERRLOG("ERROR trying to create crypto session!\n"); 2018 rc = -EINVAL; 2019 goto error_session_en_create; 2020 } 2021 2022 vbdev->session_decrypt = rte_cryptodev_sym_session_create(g_session_mp); 2023 if (NULL == vbdev->session_decrypt) { 2024 SPDK_ERRLOG("ERROR trying to create crypto session!\n"); 2025 rc = -EINVAL; 2026 goto error_session_de_create; 2027 } 2028 2029 /* Init our per vbdev xform with the desired cipher options. */ 2030 vbdev->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; 2031 vbdev->cipher_xform.cipher.iv.offset = IV_OFFSET; 2032 if (strcmp(name->cipher, AES_CBC) == 0) { 2033 vbdev->cipher_xform.cipher.key.data = vbdev->key; 2034 vbdev->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC; 2035 vbdev->cipher_xform.cipher.key.length = AES_CBC_KEY_LENGTH; 2036 } else { 2037 vbdev->cipher_xform.cipher.key.data = vbdev->xts_key; 2038 vbdev->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_XTS; 2039 vbdev->cipher_xform.cipher.key.length = AES_XTS_KEY_LENGTH * 2; 2040 } 2041 vbdev->cipher_xform.cipher.iv.length = IV_LENGTH; 2042 2043 vbdev->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT; 2044 rc = rte_cryptodev_sym_session_init(device->cdev_id, vbdev->session_encrypt, 2045 &vbdev->cipher_xform, 2046 g_session_mp_priv ? g_session_mp_priv : g_session_mp); 2047 if (rc < 0) { 2048 SPDK_ERRLOG("ERROR trying to init encrypt session!\n"); 2049 rc = -EINVAL; 2050 goto error_session_init; 2051 } 2052 2053 vbdev->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT; 2054 rc = rte_cryptodev_sym_session_init(device->cdev_id, vbdev->session_decrypt, 2055 &vbdev->cipher_xform, 2056 g_session_mp_priv ? g_session_mp_priv : g_session_mp); 2057 if (rc < 0) { 2058 SPDK_ERRLOG("ERROR trying to init decrypt session!\n"); 2059 rc = -EINVAL; 2060 goto error_session_init; 2061 } 2062 2063 rc = spdk_bdev_register(&vbdev->crypto_bdev); 2064 if (rc < 0) { 2065 SPDK_ERRLOG("ERROR trying to register bdev\n"); 2066 rc = -EINVAL; 2067 goto error_bdev_register; 2068 } 2069 SPDK_DEBUGLOG(vbdev_crypto, "registered io_device and virtual bdev for: %s\n", 2070 name->vbdev_name); 2071 break; 2072 } 2073 2074 return rc; 2075 2076 /* Error cleanup paths. */ 2077 error_bdev_register: 2078 error_session_init: 2079 rte_cryptodev_sym_session_free(vbdev->session_decrypt); 2080 error_session_de_create: 2081 rte_cryptodev_sym_session_free(vbdev->session_encrypt); 2082 error_session_en_create: 2083 error_cant_find_devid: 2084 spdk_bdev_module_release_bdev(vbdev->base_bdev); 2085 error_claim: 2086 TAILQ_REMOVE(&g_vbdev_crypto, vbdev, link); 2087 spdk_io_device_unregister(vbdev, NULL); 2088 if (vbdev->xts_key) { 2089 memset(vbdev->xts_key, 0, AES_XTS_KEY_LENGTH * 2); 2090 free(vbdev->xts_key); 2091 } 2092 error_xts_key: 2093 spdk_bdev_close(vbdev->base_desc); 2094 error_open: 2095 free(vbdev->drv_name); 2096 error_drv_name: 2097 if (vbdev->key2) { 2098 memset(vbdev->key2, 0, strlen(vbdev->key2)); 2099 free(vbdev->key2); 2100 } 2101 error_alloc_key2: 2102 if (vbdev->key) { 2103 memset(vbdev->key, 0, strlen(vbdev->key)); 2104 free(vbdev->key); 2105 } 2106 error_alloc_key: 2107 free(vbdev->crypto_bdev.name); 2108 error_bdev_name: 2109 free(vbdev); 2110 error_vbdev_alloc: 2111 g_number_of_claimed_volumes--; 2112 return rc; 2113 } 2114 2115 /* RPC entry for deleting a crypto vbdev. */ 2116 void 2117 delete_crypto_disk(struct spdk_bdev *bdev, spdk_delete_crypto_complete cb_fn, 2118 void *cb_arg) 2119 { 2120 struct bdev_names *name; 2121 2122 if (!bdev || bdev->module != &crypto_if) { 2123 cb_fn(cb_arg, -ENODEV); 2124 return; 2125 } 2126 2127 /* Remove the association (vbdev, bdev) from g_bdev_names. This is required so that the 2128 * vbdev does not get re-created if the same bdev is constructed at some other time, 2129 * unless the underlying bdev was hot-removed. 2130 */ 2131 TAILQ_FOREACH(name, &g_bdev_names, link) { 2132 if (strcmp(name->vbdev_name, bdev->name) == 0) { 2133 TAILQ_REMOVE(&g_bdev_names, name, link); 2134 free(name->bdev_name); 2135 free(name->vbdev_name); 2136 free(name->drv_name); 2137 memset(name->key, 0, strlen(name->key)); 2138 free(name->key); 2139 if (name->key2) { 2140 memset(name->key2, 0, strlen(name->key2)); 2141 free(name->key2); 2142 } 2143 free(name); 2144 break; 2145 } 2146 } 2147 2148 /* Additional cleanup happens in the destruct callback. */ 2149 spdk_bdev_unregister(bdev, cb_fn, cb_arg); 2150 } 2151 2152 /* Because we specified this function in our crypto bdev function table when we 2153 * registered our crypto bdev, we'll get this call anytime a new bdev shows up. 2154 * Here we need to decide if we care about it and if so what to do. We 2155 * parsed the config file at init so we check the new bdev against the list 2156 * we built up at that time and if the user configured us to attach to this 2157 * bdev, here's where we do it. 2158 */ 2159 static void 2160 vbdev_crypto_examine(struct spdk_bdev *bdev) 2161 { 2162 vbdev_crypto_claim(spdk_bdev_get_name(bdev)); 2163 spdk_bdev_module_examine_done(&crypto_if); 2164 } 2165 2166 SPDK_LOG_REGISTER_COMPONENT(vbdev_crypto) 2167