1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2018 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2022, 2023 NVIDIA CORPORATION & AFFILIATES. 5 * All rights reserved. 6 */ 7 8 #include "vbdev_crypto.h" 9 10 #include "spdk_internal/assert.h" 11 #include "spdk/thread.h" 12 #include "spdk/bdev_module.h" 13 #include "spdk/likely.h" 14 15 struct bdev_names { 16 struct vbdev_crypto_opts *opts; 17 TAILQ_ENTRY(bdev_names) link; 18 }; 19 20 /* List of crypto_bdev names and their base bdevs via configuration file. */ 21 static TAILQ_HEAD(, bdev_names) g_bdev_names = TAILQ_HEAD_INITIALIZER(g_bdev_names); 22 23 struct vbdev_crypto { 24 struct spdk_bdev *base_bdev; /* the thing we're attaching to */ 25 struct spdk_bdev_desc *base_desc; /* its descriptor we get from open */ 26 struct spdk_bdev crypto_bdev; /* the crypto virtual bdev */ 27 struct vbdev_crypto_opts *opts; /* crypto options such as names and DEK */ 28 TAILQ_ENTRY(vbdev_crypto) link; 29 struct spdk_thread *thread; /* thread where base device is opened */ 30 }; 31 32 /* List of virtual bdevs and associated info for each. We keep the device friendly name here even 33 * though its also in the device struct because we use it early on. 34 */ 35 static TAILQ_HEAD(, vbdev_crypto) g_vbdev_crypto = TAILQ_HEAD_INITIALIZER(g_vbdev_crypto); 36 37 /* The crypto vbdev channel struct. It is allocated and freed on my behalf by the io channel code. 38 * We store things in here that are needed on per thread basis like the base_channel for this thread. 39 */ 40 struct crypto_io_channel { 41 struct spdk_io_channel *base_ch; /* IO channel of base device */ 42 struct spdk_io_channel *accel_channel; /* Accel engine channel used for crypto ops */ 43 struct spdk_accel_crypto_key *crypto_key; 44 TAILQ_HEAD(, spdk_bdev_io) in_accel_fw; /* request submitted to accel fw */ 45 struct spdk_io_channel_iter *reset_iter; /* used with for_each_channel in reset */ 46 }; 47 48 enum crypto_io_resubmit_state { 49 CRYPTO_IO_NEW, /* Resubmit IO from the scratch */ 50 CRYPTO_IO_READ_DONE, /* Need to decrypt */ 51 CRYPTO_IO_ENCRYPT_DONE, /* Need to write */ 52 }; 53 54 /* This is the crypto per IO context that the bdev layer allocates for us opaquely and attaches to 55 * each IO for us. 56 */ 57 struct crypto_bdev_io { 58 struct crypto_io_channel *crypto_ch; /* need to store for crypto completion handling */ 59 struct vbdev_crypto *crypto_bdev; /* the crypto node struct associated with this IO */ 60 struct spdk_bdev_io *read_io; /* the read IO we issued */ 61 /* Used for the single contiguous buffer that serves as the crypto destination target for writes */ 62 uint64_t aux_num_blocks; /* num of blocks for the contiguous buffer */ 63 uint64_t aux_offset_blocks; /* block offset on media */ 64 void *aux_buf_raw; /* raw buffer that the bdev layer gave us for write buffer */ 65 struct iovec aux_buf_iov; /* iov representing aligned contig write buffer */ 66 67 /* for bdev_io_wait */ 68 struct spdk_bdev_io_wait_entry bdev_io_wait; 69 enum crypto_io_resubmit_state resubmit_state; 70 }; 71 72 static void vbdev_crypto_queue_io(struct spdk_bdev_io *bdev_io, 73 enum crypto_io_resubmit_state state); 74 static void _complete_internal_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg); 75 static void _complete_internal_read(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg); 76 static void _complete_internal_write(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg); 77 static void vbdev_crypto_examine(struct spdk_bdev *bdev); 78 static int vbdev_crypto_claim(const char *bdev_name); 79 static void vbdev_crypto_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io); 80 81 /* Following an encrypt or decrypt we need to then either write the encrypted data or finish 82 * the read on decrypted data. Do that here. 83 */ 84 static void 85 _crypto_operation_complete(void *ref, int status) 86 { 87 struct spdk_bdev_io *bdev_io = ref; 88 struct vbdev_crypto *crypto_bdev = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_crypto, 89 crypto_bdev); 90 struct crypto_bdev_io *crypto_io = (struct crypto_bdev_io *)bdev_io->driver_ctx; 91 struct crypto_io_channel *crypto_ch = crypto_io->crypto_ch; 92 struct spdk_bdev_io *free_me = crypto_io->read_io; 93 int rc = 0; 94 95 if (status || crypto_ch->reset_iter) { 96 /* If we're completing this with an outstanding reset we need to fail it */ 97 rc = -EINVAL; 98 } 99 100 TAILQ_REMOVE(&crypto_ch->in_accel_fw, bdev_io, module_link); 101 102 if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 103 /* Complete the original IO and then free the one that we created 104 * as a result of issuing an IO via submit_request. 105 */ 106 if (!rc) { 107 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS); 108 } else { 109 SPDK_ERRLOG("Issue with decryption on bdev_io %p\n", bdev_io); 110 } 111 spdk_bdev_free_io(free_me); 112 113 } else if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 114 if (!rc) { 115 /* Write the encrypted data. */ 116 rc = spdk_bdev_writev_blocks(crypto_bdev->base_desc, crypto_ch->base_ch, 117 &crypto_io->aux_buf_iov, 1, crypto_io->aux_offset_blocks, 118 crypto_io->aux_num_blocks, _complete_internal_write, 119 bdev_io); 120 if (rc == -ENOMEM) { 121 vbdev_crypto_queue_io(bdev_io, CRYPTO_IO_ENCRYPT_DONE); 122 goto check_reset; 123 } 124 } else { 125 SPDK_ERRLOG("Issue with encryption on bdev_io %p\n", bdev_io); 126 } 127 } else { 128 SPDK_ERRLOG("Unknown bdev type %u on crypto operation completion\n", bdev_io->type); 129 rc = -EINVAL; 130 } 131 132 if (rc) { 133 if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 134 spdk_bdev_io_put_aux_buf(bdev_io, crypto_io->aux_buf_raw); 135 } 136 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 137 } 138 139 check_reset: 140 /* If the channel iter is not NULL, we need to wait 141 * until the pending list is empty, then we can move on to the 142 * next channel. 143 */ 144 if (crypto_ch->reset_iter && TAILQ_EMPTY(&crypto_ch->in_accel_fw)) { 145 SPDK_NOTICELOG("Channel %p has been quiesced.\n", crypto_ch); 146 spdk_for_each_channel_continue(crypto_ch->reset_iter, 0); 147 crypto_ch->reset_iter = NULL; 148 } 149 } 150 151 /* We're either encrypting on the way down or decrypting on the way back. */ 152 static int 153 _crypto_operation(struct spdk_bdev_io *bdev_io, bool encrypt, void *aux_buf) 154 { 155 struct crypto_bdev_io *crypto_io = (struct crypto_bdev_io *)bdev_io->driver_ctx; 156 struct crypto_io_channel *crypto_ch = crypto_io->crypto_ch; 157 uint32_t crypto_len = crypto_io->crypto_bdev->crypto_bdev.blocklen; 158 uint64_t total_length; 159 uint64_t alignment; 160 int rc; 161 162 /* For encryption, we need to prepare a single contiguous buffer as the encryption 163 * destination, we'll then pass that along for the write after encryption is done. 164 * This is done to avoiding encrypting the provided write buffer which may be 165 * undesirable in some use cases. 166 */ 167 if (encrypt) { 168 total_length = bdev_io->u.bdev.num_blocks * crypto_len; 169 alignment = spdk_bdev_get_buf_align(&crypto_io->crypto_bdev->crypto_bdev); 170 crypto_io->aux_buf_iov.iov_len = total_length; 171 crypto_io->aux_buf_raw = aux_buf; 172 crypto_io->aux_buf_iov.iov_base = (void *)(((uintptr_t)aux_buf + (alignment - 1)) & ~ 173 (alignment - 1)); 174 crypto_io->aux_offset_blocks = bdev_io->u.bdev.offset_blocks; 175 crypto_io->aux_num_blocks = bdev_io->u.bdev.num_blocks; 176 177 rc = spdk_accel_submit_encrypt(crypto_ch->accel_channel, crypto_ch->crypto_key, 178 &crypto_io->aux_buf_iov, 1, 179 bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, 180 bdev_io->u.bdev.offset_blocks, crypto_len, 0, 181 _crypto_operation_complete, bdev_io); 182 } else { 183 rc = spdk_accel_submit_decrypt(crypto_ch->accel_channel, crypto_ch->crypto_key, 184 bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.iovs, 185 bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks, 186 crypto_len, 0, 187 _crypto_operation_complete, bdev_io); 188 } 189 190 if (!rc) { 191 TAILQ_INSERT_TAIL(&crypto_ch->in_accel_fw, bdev_io, module_link); 192 } 193 194 return rc; 195 } 196 197 /* This function is called after all channels have been quiesced following 198 * a bdev reset. 199 */ 200 static void 201 _ch_quiesce_done(struct spdk_io_channel_iter *i, int status) 202 { 203 struct crypto_bdev_io *crypto_io = spdk_io_channel_iter_get_ctx(i); 204 struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(crypto_io); 205 206 assert(TAILQ_EMPTY(&crypto_io->crypto_ch->in_accel_fw)); 207 208 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS); 209 } 210 211 static void 212 _ch_quiesce(struct spdk_io_channel_iter *i) 213 { 214 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); 215 struct crypto_io_channel *crypto_ch = spdk_io_channel_get_ctx(ch); 216 217 if (TAILQ_EMPTY(&crypto_ch->in_accel_fw)) { 218 spdk_for_each_channel_continue(i, 0); 219 } else { 220 /* In accel completion callback we will see the non-NULL iter and handle the quiesce */ 221 crypto_ch->reset_iter = i; 222 } 223 } 224 225 /* Completion callback for IO that were issued from this bdev other than read/write. 226 * They have their own for readability. 227 */ 228 static void 229 _complete_internal_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 230 { 231 struct spdk_bdev_io *orig_io = cb_arg; 232 int status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED; 233 234 if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) { 235 struct crypto_bdev_io *orig_ctx = (struct crypto_bdev_io *)orig_io->driver_ctx; 236 237 spdk_bdev_free_io(bdev_io); 238 239 spdk_for_each_channel(orig_ctx->crypto_bdev, 240 _ch_quiesce, 241 orig_ctx, 242 _ch_quiesce_done); 243 return; 244 } 245 246 spdk_bdev_io_complete(orig_io, status); 247 spdk_bdev_free_io(bdev_io); 248 } 249 250 /* Completion callback for writes that were issued from this bdev. */ 251 static void 252 _complete_internal_write(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 253 { 254 struct spdk_bdev_io *orig_io = cb_arg; 255 int status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED; 256 struct crypto_bdev_io *orig_ctx = (struct crypto_bdev_io *)orig_io->driver_ctx; 257 258 spdk_bdev_io_put_aux_buf(orig_io, orig_ctx->aux_buf_raw); 259 260 spdk_bdev_io_complete(orig_io, status); 261 spdk_bdev_free_io(bdev_io); 262 } 263 264 /* Completion callback for reads that were issued from this bdev. */ 265 static void 266 _complete_internal_read(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 267 { 268 struct spdk_bdev_io *orig_io = cb_arg; 269 struct crypto_bdev_io *orig_ctx = (struct crypto_bdev_io *)orig_io->driver_ctx; 270 int rc; 271 272 if (success) { 273 /* Save off this bdev_io so it can be freed after decryption. */ 274 orig_ctx->read_io = bdev_io; 275 rc = _crypto_operation(orig_io, false, NULL); 276 if (!rc) { 277 return; 278 } else { 279 if (rc == -ENOMEM) { 280 SPDK_DEBUGLOG(vbdev_crypto, "No memory, queue the IO.\n"); 281 /* We will repeat crypto operation later */ 282 vbdev_crypto_queue_io(orig_io, CRYPTO_IO_READ_DONE); 283 return; 284 } else { 285 SPDK_ERRLOG("Failed to decrypt, rc %d\n", rc); 286 } 287 } 288 } else { 289 SPDK_ERRLOG("Failed to read prior to decrypting!\n"); 290 } 291 292 spdk_bdev_io_complete(orig_io, SPDK_BDEV_IO_STATUS_FAILED); 293 spdk_bdev_free_io(bdev_io); 294 } 295 296 static void 297 vbdev_crypto_resubmit_io(void *arg) 298 { 299 struct spdk_bdev_io *bdev_io = (struct spdk_bdev_io *)arg; 300 struct crypto_bdev_io *crypto_io = (struct crypto_bdev_io *)bdev_io->driver_ctx; 301 struct spdk_io_channel *ch; 302 303 switch (crypto_io->resubmit_state) { 304 case CRYPTO_IO_NEW: 305 assert(crypto_io->crypto_ch); 306 ch = spdk_io_channel_from_ctx(crypto_io->crypto_ch); 307 vbdev_crypto_submit_request(ch, bdev_io); 308 break; 309 case CRYPTO_IO_ENCRYPT_DONE: 310 _crypto_operation_complete(bdev_io, 0); 311 break; 312 case CRYPTO_IO_READ_DONE: 313 _complete_internal_read(crypto_io->read_io, true, bdev_io); 314 break; 315 default: 316 SPDK_UNREACHABLE(); 317 } 318 } 319 320 static void 321 vbdev_crypto_queue_io(struct spdk_bdev_io *bdev_io, enum crypto_io_resubmit_state state) 322 { 323 struct crypto_bdev_io *crypto_io = (struct crypto_bdev_io *)bdev_io->driver_ctx; 324 int rc; 325 326 crypto_io->bdev_io_wait.bdev = bdev_io->bdev; 327 crypto_io->bdev_io_wait.cb_fn = vbdev_crypto_resubmit_io; 328 crypto_io->bdev_io_wait.cb_arg = bdev_io; 329 crypto_io->resubmit_state = state; 330 331 rc = spdk_bdev_queue_io_wait(bdev_io->bdev, crypto_io->crypto_ch->base_ch, 332 &crypto_io->bdev_io_wait); 333 if (rc != 0) { 334 SPDK_ERRLOG("Queue io failed in vbdev_crypto_queue_io, rc=%d.\n", rc); 335 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 336 } 337 } 338 339 /* Callback for getting a buf from the bdev pool in the event that the caller passed 340 * in NULL, we need to own the buffer so it doesn't get freed by another vbdev module 341 * beneath us before we're done with it. 342 */ 343 static void 344 crypto_read_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 345 bool success) 346 { 347 struct vbdev_crypto *crypto_bdev = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_crypto, 348 crypto_bdev); 349 struct crypto_io_channel *crypto_ch = spdk_io_channel_get_ctx(ch); 350 int rc; 351 352 if (!success) { 353 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 354 return; 355 } 356 357 rc = spdk_bdev_readv_blocks(crypto_bdev->base_desc, crypto_ch->base_ch, bdev_io->u.bdev.iovs, 358 bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks, 359 bdev_io->u.bdev.num_blocks, _complete_internal_read, 360 bdev_io); 361 if (rc != 0) { 362 if (rc == -ENOMEM) { 363 SPDK_DEBUGLOG(vbdev_crypto, "No memory, queue the IO.\n"); 364 vbdev_crypto_queue_io(bdev_io, CRYPTO_IO_NEW); 365 } else { 366 SPDK_ERRLOG("Failed to submit bdev_io!\n"); 367 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 368 } 369 } 370 } 371 372 /* For encryption we don't want to encrypt the data in place as the host isn't 373 * expecting us to mangle its data buffers so we need to encrypt into the bdev 374 * aux buffer, then we can use that as the source for the disk data transfer. 375 */ 376 static void 377 crypto_write_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 378 void *aux_buf) 379 { 380 int rc; 381 382 if (spdk_unlikely(!aux_buf)) { 383 SPDK_ERRLOG("Failed to get aux buffer!\n"); 384 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 385 return; 386 } 387 rc = _crypto_operation(bdev_io, true, aux_buf); 388 if (rc != 0) { 389 spdk_bdev_io_put_aux_buf(bdev_io, aux_buf); 390 if (rc == -ENOMEM) { 391 SPDK_DEBUGLOG(vbdev_crypto, "No memory, queue the IO.\n"); 392 vbdev_crypto_queue_io(bdev_io, CRYPTO_IO_NEW); 393 } else { 394 SPDK_ERRLOG("Failed to submit crypto operation!\n"); 395 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 396 } 397 } 398 } 399 400 /* Called when someone submits IO to this crypto vbdev. For IO's not relevant to crypto, 401 * we're simply passing it on here via SPDK IO calls which in turn allocate another bdev IO 402 * and call our cpl callback provided below along with the original bdev_io so that we can 403 * complete it once this IO completes. For crypto operations, we'll either encrypt it first 404 * (writes) then call back into bdev to submit it or we'll submit a read and then catch it 405 * on the way back for decryption. 406 */ 407 static void 408 vbdev_crypto_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 409 { 410 struct vbdev_crypto *crypto_bdev = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_crypto, 411 crypto_bdev); 412 struct crypto_io_channel *crypto_ch = spdk_io_channel_get_ctx(ch); 413 struct crypto_bdev_io *crypto_io = (struct crypto_bdev_io *)bdev_io->driver_ctx; 414 int rc = 0; 415 416 memset(crypto_io, 0, sizeof(struct crypto_bdev_io)); 417 crypto_io->crypto_bdev = crypto_bdev; 418 crypto_io->crypto_ch = crypto_ch; 419 420 switch (bdev_io->type) { 421 case SPDK_BDEV_IO_TYPE_READ: 422 spdk_bdev_io_get_buf(bdev_io, crypto_read_get_buf_cb, 423 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 424 break; 425 case SPDK_BDEV_IO_TYPE_WRITE: 426 /* Tell the bdev layer that we need an aux buf in addition to the data 427 * buf already associated with the bdev. 428 */ 429 spdk_bdev_io_get_aux_buf(bdev_io, crypto_write_get_buf_cb); 430 break; 431 case SPDK_BDEV_IO_TYPE_UNMAP: 432 rc = spdk_bdev_unmap_blocks(crypto_bdev->base_desc, crypto_ch->base_ch, 433 bdev_io->u.bdev.offset_blocks, 434 bdev_io->u.bdev.num_blocks, 435 _complete_internal_io, bdev_io); 436 break; 437 case SPDK_BDEV_IO_TYPE_FLUSH: 438 rc = spdk_bdev_flush_blocks(crypto_bdev->base_desc, crypto_ch->base_ch, 439 bdev_io->u.bdev.offset_blocks, 440 bdev_io->u.bdev.num_blocks, 441 _complete_internal_io, bdev_io); 442 break; 443 case SPDK_BDEV_IO_TYPE_RESET: 444 rc = spdk_bdev_reset(crypto_bdev->base_desc, crypto_ch->base_ch, 445 _complete_internal_io, bdev_io); 446 break; 447 case SPDK_BDEV_IO_TYPE_WRITE_ZEROES: 448 default: 449 SPDK_ERRLOG("crypto: unknown I/O type %d\n", bdev_io->type); 450 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 451 return; 452 } 453 454 if (rc != 0) { 455 if (rc == -ENOMEM) { 456 SPDK_DEBUGLOG(vbdev_crypto, "No memory, queue the IO.\n"); 457 vbdev_crypto_queue_io(bdev_io, CRYPTO_IO_NEW); 458 } else { 459 SPDK_ERRLOG("Failed to submit bdev_io!\n"); 460 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 461 } 462 } 463 } 464 465 /* We'll just call the base bdev and let it answer except for WZ command which 466 * we always say we don't support so that the bdev layer will actually send us 467 * real writes that we can encrypt. 468 */ 469 static bool 470 vbdev_crypto_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type) 471 { 472 struct vbdev_crypto *crypto_bdev = (struct vbdev_crypto *)ctx; 473 474 switch (io_type) { 475 case SPDK_BDEV_IO_TYPE_WRITE: 476 case SPDK_BDEV_IO_TYPE_UNMAP: 477 case SPDK_BDEV_IO_TYPE_RESET: 478 case SPDK_BDEV_IO_TYPE_READ: 479 case SPDK_BDEV_IO_TYPE_FLUSH: 480 return spdk_bdev_io_type_supported(crypto_bdev->base_bdev, io_type); 481 case SPDK_BDEV_IO_TYPE_WRITE_ZEROES: 482 /* Force the bdev layer to issue actual writes of zeroes so we can 483 * encrypt them as regular writes. 484 */ 485 default: 486 return false; 487 } 488 } 489 490 /* Callback for unregistering the IO device. */ 491 static void 492 _device_unregister_cb(void *io_device) 493 { 494 struct vbdev_crypto *crypto_bdev = io_device; 495 496 /* Done with this crypto_bdev. */ 497 crypto_bdev->opts = NULL; 498 499 spdk_bdev_destruct_done(&crypto_bdev->crypto_bdev, 0); 500 free(crypto_bdev->crypto_bdev.name); 501 free(crypto_bdev); 502 } 503 504 /* Wrapper for the bdev close operation. */ 505 static void 506 _vbdev_crypto_destruct(void *ctx) 507 { 508 struct spdk_bdev_desc *desc = ctx; 509 510 spdk_bdev_close(desc); 511 } 512 513 /* Called after we've unregistered following a hot remove callback. 514 * Our finish entry point will be called next. 515 */ 516 static int 517 vbdev_crypto_destruct(void *ctx) 518 { 519 struct vbdev_crypto *crypto_bdev = (struct vbdev_crypto *)ctx; 520 521 /* Remove this device from the internal list */ 522 TAILQ_REMOVE(&g_vbdev_crypto, crypto_bdev, link); 523 524 /* Unclaim the underlying bdev. */ 525 spdk_bdev_module_release_bdev(crypto_bdev->base_bdev); 526 527 /* Close the underlying bdev on its same opened thread. */ 528 if (crypto_bdev->thread && crypto_bdev->thread != spdk_get_thread()) { 529 spdk_thread_send_msg(crypto_bdev->thread, _vbdev_crypto_destruct, crypto_bdev->base_desc); 530 } else { 531 spdk_bdev_close(crypto_bdev->base_desc); 532 } 533 534 /* Unregister the io_device. */ 535 spdk_io_device_unregister(crypto_bdev, _device_unregister_cb); 536 537 return 1; 538 } 539 540 /* We supplied this as an entry point for upper layers who want to communicate to this 541 * bdev. This is how they get a channel. We are passed the same context we provided when 542 * we created our crypto vbdev in examine() which, for this bdev, is the address of one of 543 * our context nodes. From here we'll ask the SPDK channel code to fill out our channel 544 * struct and we'll keep it in our crypto node. 545 */ 546 static struct spdk_io_channel * 547 vbdev_crypto_get_io_channel(void *ctx) 548 { 549 struct vbdev_crypto *crypto_bdev = (struct vbdev_crypto *)ctx; 550 551 /* The IO channel code will allocate a channel for us which consists of 552 * the SPDK channel structure plus the size of our crypto_io_channel struct 553 * that we passed in when we registered our IO device. It will then call 554 * our channel create callback to populate any elements that we need to 555 * update. 556 */ 557 return spdk_get_io_channel(crypto_bdev); 558 } 559 560 /* This is the output for bdev_get_bdevs() for this vbdev */ 561 static int 562 vbdev_crypto_dump_info_json(void *ctx, struct spdk_json_write_ctx *w) 563 { 564 struct vbdev_crypto *crypto_bdev = (struct vbdev_crypto *)ctx; 565 566 spdk_json_write_name(w, "crypto"); 567 spdk_json_write_object_begin(w); 568 spdk_json_write_named_string(w, "base_bdev_name", spdk_bdev_get_name(crypto_bdev->base_bdev)); 569 spdk_json_write_named_string(w, "name", spdk_bdev_get_name(&crypto_bdev->crypto_bdev)); 570 spdk_json_write_named_string(w, "key_name", crypto_bdev->opts->key->param.key_name); 571 spdk_json_write_object_end(w); 572 573 return 0; 574 } 575 576 static int 577 vbdev_crypto_config_json(struct spdk_json_write_ctx *w) 578 { 579 struct vbdev_crypto *crypto_bdev; 580 581 TAILQ_FOREACH(crypto_bdev, &g_vbdev_crypto, link) { 582 spdk_json_write_object_begin(w); 583 spdk_json_write_named_string(w, "method", "bdev_crypto_create"); 584 spdk_json_write_named_object_begin(w, "params"); 585 spdk_json_write_named_string(w, "base_bdev_name", spdk_bdev_get_name(crypto_bdev->base_bdev)); 586 spdk_json_write_named_string(w, "name", spdk_bdev_get_name(&crypto_bdev->crypto_bdev)); 587 spdk_json_write_named_string(w, "key_name", crypto_bdev->opts->key->param.key_name); 588 spdk_json_write_object_end(w); 589 spdk_json_write_object_end(w); 590 } 591 return 0; 592 } 593 594 /* We provide this callback for the SPDK channel code to create a channel using 595 * the channel struct we provided in our module get_io_channel() entry point. Here 596 * we get and save off an underlying base channel of the device below us so that 597 * we can communicate with the base bdev on a per channel basis. We also register the 598 * poller used to complete crypto operations from the device. 599 */ 600 static int 601 crypto_bdev_ch_create_cb(void *io_device, void *ctx_buf) 602 { 603 struct crypto_io_channel *crypto_ch = ctx_buf; 604 struct vbdev_crypto *crypto_bdev = io_device; 605 606 crypto_ch->base_ch = spdk_bdev_get_io_channel(crypto_bdev->base_desc); 607 crypto_ch->accel_channel = spdk_accel_get_io_channel(); 608 crypto_ch->crypto_key = crypto_bdev->opts->key; 609 610 /* We use this queue to track outstanding IO in our layer. */ 611 TAILQ_INIT(&crypto_ch->in_accel_fw); 612 613 return 0; 614 } 615 616 /* We provide this callback for the SPDK channel code to destroy a channel 617 * created with our create callback. We just need to undo anything we did 618 * when we created. 619 */ 620 static void 621 crypto_bdev_ch_destroy_cb(void *io_device, void *ctx_buf) 622 { 623 struct crypto_io_channel *crypto_ch = ctx_buf; 624 625 spdk_put_io_channel(crypto_ch->base_ch); 626 spdk_put_io_channel(crypto_ch->accel_channel); 627 } 628 629 /* Create the association from the bdev and vbdev name and insert 630 * on the global list. */ 631 static int 632 vbdev_crypto_insert_name(struct vbdev_crypto_opts *opts, struct bdev_names **out) 633 { 634 struct bdev_names *name; 635 636 assert(opts); 637 assert(out); 638 639 TAILQ_FOREACH(name, &g_bdev_names, link) { 640 if (strcmp(opts->vbdev_name, name->opts->vbdev_name) == 0) { 641 SPDK_ERRLOG("Crypto bdev %s already exists\n", opts->vbdev_name); 642 return -EEXIST; 643 } 644 } 645 646 name = calloc(1, sizeof(struct bdev_names)); 647 if (!name) { 648 SPDK_ERRLOG("Failed to allocate memory for bdev_names.\n"); 649 return -ENOMEM; 650 } 651 652 name->opts = opts; 653 TAILQ_INSERT_TAIL(&g_bdev_names, name, link); 654 *out = name; 655 656 return 0; 657 } 658 659 void 660 free_crypto_opts(struct vbdev_crypto_opts *opts) 661 { 662 free(opts->bdev_name); 663 free(opts->vbdev_name); 664 free(opts); 665 } 666 667 static void 668 vbdev_crypto_delete_name(struct bdev_names *name) 669 { 670 TAILQ_REMOVE(&g_bdev_names, name, link); 671 if (name->opts) { 672 if (name->opts->key_owner && name->opts->key) { 673 spdk_accel_crypto_key_destroy(name->opts->key); 674 } 675 free_crypto_opts(name->opts); 676 name->opts = NULL; 677 } 678 free(name); 679 } 680 681 /* RPC entry point for crypto creation. */ 682 int 683 create_crypto_disk(struct vbdev_crypto_opts *opts) 684 { 685 struct bdev_names *name = NULL; 686 int rc; 687 688 rc = vbdev_crypto_insert_name(opts, &name); 689 if (rc) { 690 return rc; 691 } 692 693 rc = vbdev_crypto_claim(opts->bdev_name); 694 if (rc == -ENODEV) { 695 SPDK_NOTICELOG("vbdev creation deferred pending base bdev arrival\n"); 696 rc = 0; 697 } 698 699 if (rc) { 700 assert(name != NULL); 701 /* In case of error we let the caller function to deallocate @opts 702 * since it is its responsibility. Setting name->opts = NULL let's 703 * vbdev_crypto_delete_name() know it does not have to do anything 704 * about @opts. 705 */ 706 name->opts = NULL; 707 vbdev_crypto_delete_name(name); 708 } 709 return rc; 710 } 711 712 /* Called at driver init time, parses config file to prepare for examine calls, 713 * also fully initializes the crypto drivers. 714 */ 715 static int 716 vbdev_crypto_init(void) 717 { 718 return 0; 719 } 720 721 /* Called when the entire module is being torn down. */ 722 static void 723 vbdev_crypto_finish(void) 724 { 725 struct bdev_names *name; 726 727 while ((name = TAILQ_FIRST(&g_bdev_names))) { 728 vbdev_crypto_delete_name(name); 729 } 730 } 731 732 /* During init we'll be asked how much memory we'd like passed to us 733 * in bev_io structures as context. Here's where we specify how 734 * much context we want per IO. 735 */ 736 static int 737 vbdev_crypto_get_ctx_size(void) 738 { 739 return sizeof(struct crypto_bdev_io); 740 } 741 742 static void 743 vbdev_crypto_base_bdev_hotremove_cb(struct spdk_bdev *bdev_find) 744 { 745 struct vbdev_crypto *crypto_bdev, *tmp; 746 747 TAILQ_FOREACH_SAFE(crypto_bdev, &g_vbdev_crypto, link, tmp) { 748 if (bdev_find == crypto_bdev->base_bdev) { 749 spdk_bdev_unregister(&crypto_bdev->crypto_bdev, NULL, NULL); 750 } 751 } 752 } 753 754 /* Called when the underlying base bdev triggers asynchronous event such as bdev removal. */ 755 static void 756 vbdev_crypto_base_bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, 757 void *event_ctx) 758 { 759 switch (type) { 760 case SPDK_BDEV_EVENT_REMOVE: 761 vbdev_crypto_base_bdev_hotremove_cb(bdev); 762 break; 763 default: 764 SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type); 765 break; 766 } 767 } 768 769 /* When we register our bdev this is how we specify our entry points. */ 770 static const struct spdk_bdev_fn_table vbdev_crypto_fn_table = { 771 .destruct = vbdev_crypto_destruct, 772 .submit_request = vbdev_crypto_submit_request, 773 .io_type_supported = vbdev_crypto_io_type_supported, 774 .get_io_channel = vbdev_crypto_get_io_channel, 775 .dump_info_json = vbdev_crypto_dump_info_json, 776 }; 777 778 static struct spdk_bdev_module crypto_if = { 779 .name = "crypto", 780 .module_init = vbdev_crypto_init, 781 .get_ctx_size = vbdev_crypto_get_ctx_size, 782 .examine_config = vbdev_crypto_examine, 783 .module_fini = vbdev_crypto_finish, 784 .config_json = vbdev_crypto_config_json 785 }; 786 787 SPDK_BDEV_MODULE_REGISTER(crypto, &crypto_if) 788 789 static int 790 vbdev_crypto_claim(const char *bdev_name) 791 { 792 struct bdev_names *name; 793 struct vbdev_crypto *vbdev; 794 struct spdk_bdev *bdev; 795 struct spdk_iobuf_opts iobuf_opts; 796 int rc = 0; 797 798 /* Limit the max IO size by some reasonable value. Since in write operation we use aux buffer, 799 * let's set the limit to the large_bufsize value */ 800 spdk_iobuf_get_opts(&iobuf_opts); 801 802 /* Check our list of names from config versus this bdev and if 803 * there's a match, create the crypto_bdev & bdev accordingly. 804 */ 805 TAILQ_FOREACH(name, &g_bdev_names, link) { 806 if (strcmp(name->opts->bdev_name, bdev_name) != 0) { 807 continue; 808 } 809 SPDK_DEBUGLOG(vbdev_crypto, "Match on %s\n", bdev_name); 810 811 vbdev = calloc(1, sizeof(struct vbdev_crypto)); 812 if (!vbdev) { 813 SPDK_ERRLOG("Failed to allocate memory for crypto_bdev.\n"); 814 return -ENOMEM; 815 } 816 vbdev->crypto_bdev.product_name = "crypto"; 817 818 vbdev->crypto_bdev.name = strdup(name->opts->vbdev_name); 819 if (!vbdev->crypto_bdev.name) { 820 SPDK_ERRLOG("Failed to allocate memory for crypto_bdev name.\n"); 821 rc = -ENOMEM; 822 goto error_bdev_name; 823 } 824 825 rc = spdk_bdev_open_ext(bdev_name, true, vbdev_crypto_base_bdev_event_cb, 826 NULL, &vbdev->base_desc); 827 if (rc) { 828 if (rc != -ENODEV) { 829 SPDK_ERRLOG("Failed to open bdev %s: error %d\n", bdev_name, rc); 830 } 831 goto error_open; 832 } 833 834 bdev = spdk_bdev_desc_get_bdev(vbdev->base_desc); 835 vbdev->base_bdev = bdev; 836 837 vbdev->crypto_bdev.write_cache = bdev->write_cache; 838 if (bdev->optimal_io_boundary > 0) { 839 vbdev->crypto_bdev.optimal_io_boundary = 840 spdk_min((iobuf_opts.large_bufsize / bdev->blocklen), bdev->optimal_io_boundary); 841 } else { 842 vbdev->crypto_bdev.optimal_io_boundary = (iobuf_opts.large_bufsize / bdev->blocklen); 843 } 844 vbdev->crypto_bdev.split_on_optimal_io_boundary = true; 845 if (bdev->required_alignment > 0) { 846 vbdev->crypto_bdev.required_alignment = bdev->required_alignment; 847 } else { 848 /* Some accel modules may not support SGL input or output, if this module works with physical 849 * addresses, unaligned buffer may cross huge page boundary which leads to scattered payload. 850 * To avoid such cases, set required_alignment to the block size */ 851 vbdev->crypto_bdev.required_alignment = spdk_u32log2(bdev->blocklen); 852 } 853 vbdev->crypto_bdev.blocklen = bdev->blocklen; 854 vbdev->crypto_bdev.blockcnt = bdev->blockcnt; 855 856 /* This is the context that is passed to us when the bdev 857 * layer calls in so we'll save our crypto_bdev node here. 858 */ 859 vbdev->crypto_bdev.ctxt = vbdev; 860 vbdev->crypto_bdev.fn_table = &vbdev_crypto_fn_table; 861 vbdev->crypto_bdev.module = &crypto_if; 862 863 /* Assign crypto opts from the name. The pointer is valid up to the point 864 * the module is unloaded and all names removed from the list. */ 865 vbdev->opts = name->opts; 866 867 TAILQ_INSERT_TAIL(&g_vbdev_crypto, vbdev, link); 868 869 spdk_io_device_register(vbdev, crypto_bdev_ch_create_cb, crypto_bdev_ch_destroy_cb, 870 sizeof(struct crypto_io_channel), vbdev->crypto_bdev.name); 871 872 /* Save the thread where the base device is opened */ 873 vbdev->thread = spdk_get_thread(); 874 875 rc = spdk_bdev_module_claim_bdev(bdev, vbdev->base_desc, vbdev->crypto_bdev.module); 876 if (rc) { 877 SPDK_ERRLOG("Failed to claim bdev %s\n", spdk_bdev_get_name(bdev)); 878 goto error_claim; 879 } 880 881 rc = spdk_bdev_register(&vbdev->crypto_bdev); 882 if (rc < 0) { 883 SPDK_ERRLOG("Failed to register vbdev: error %d\n", rc); 884 rc = -EINVAL; 885 goto error_bdev_register; 886 } 887 SPDK_DEBUGLOG(vbdev_crypto, "Registered io_device and virtual bdev for: %s\n", 888 vbdev->opts->vbdev_name); 889 break; 890 } 891 892 return rc; 893 894 /* Error cleanup paths. */ 895 error_bdev_register: 896 spdk_bdev_module_release_bdev(vbdev->base_bdev); 897 error_claim: 898 TAILQ_REMOVE(&g_vbdev_crypto, vbdev, link); 899 spdk_io_device_unregister(vbdev, NULL); 900 spdk_bdev_close(vbdev->base_desc); 901 error_open: 902 free(vbdev->crypto_bdev.name); 903 error_bdev_name: 904 free(vbdev); 905 906 return rc; 907 } 908 909 struct crypto_delete_disk_ctx { 910 spdk_delete_crypto_complete cb_fn; 911 void *cb_arg; 912 char *bdev_name; 913 }; 914 915 static void 916 delete_crypto_disk_bdev_name(void *ctx, int rc) 917 { 918 struct bdev_names *name; 919 struct crypto_delete_disk_ctx *disk_ctx = ctx; 920 921 /* Remove the association (vbdev, bdev) from g_bdev_names. This is required so that the 922 * vbdev does not get re-created if the same bdev is constructed at some other time, 923 * unless the underlying bdev was hot-removed. */ 924 TAILQ_FOREACH(name, &g_bdev_names, link) { 925 if (strcmp(name->opts->vbdev_name, disk_ctx->bdev_name) == 0) { 926 vbdev_crypto_delete_name(name); 927 break; 928 } 929 } 930 931 disk_ctx->cb_fn(disk_ctx->cb_arg, rc); 932 933 free(disk_ctx->bdev_name); 934 free(disk_ctx); 935 } 936 937 /* RPC entry for deleting a crypto vbdev. */ 938 void 939 delete_crypto_disk(const char *bdev_name, spdk_delete_crypto_complete cb_fn, 940 void *cb_arg) 941 { 942 int rc; 943 struct crypto_delete_disk_ctx *ctx; 944 945 ctx = calloc(1, sizeof(struct crypto_delete_disk_ctx)); 946 if (!ctx) { 947 SPDK_ERRLOG("Failed to allocate delete crypto disk ctx\n"); 948 cb_fn(cb_arg, -ENOMEM); 949 return; 950 } 951 952 ctx->bdev_name = strdup(bdev_name); 953 if (!ctx->bdev_name) { 954 SPDK_ERRLOG("Failed to copy bdev_name\n"); 955 free(ctx); 956 cb_fn(cb_arg, -ENOMEM); 957 return; 958 } 959 ctx->cb_arg = cb_arg; 960 ctx->cb_fn = cb_fn; 961 /* Some cleanup happens in the destruct callback. */ 962 rc = spdk_bdev_unregister_by_name(bdev_name, &crypto_if, delete_crypto_disk_bdev_name, ctx); 963 if (rc != 0) { 964 SPDK_ERRLOG("Encountered an error during bdev unregistration\n"); 965 cb_fn(cb_arg, rc); 966 free(ctx->bdev_name); 967 free(ctx); 968 } 969 } 970 971 /* Because we specified this function in our crypto bdev function table when we 972 * registered our crypto bdev, we'll get this call anytime a new bdev shows up. 973 * Here we need to decide if we care about it and if so what to do. We 974 * parsed the config file at init so we check the new bdev against the list 975 * we built up at that time and if the user configured us to attach to this 976 * bdev, here's where we do it. 977 */ 978 static void 979 vbdev_crypto_examine(struct spdk_bdev *bdev) 980 { 981 vbdev_crypto_claim(spdk_bdev_get_name(bdev)); 982 spdk_bdev_module_examine_done(&crypto_if); 983 } 984 985 SPDK_LOG_REGISTER_COMPONENT(vbdev_crypto) 986