xref: /spdk/module/bdev/crypto/vbdev_crypto.c (revision ed43989a675ad1c69eeaf607951fff3c0f0414fe)
1488570ebSJim Harris /*   SPDX-License-Identifier: BSD-3-Clause
2a6dbe372Spaul luse  *   Copyright (C) 2018 Intel Corporation.
307fe6a43SSeth Howell  *   All rights reserved.
4b30c1383SAlexey Marchuk  *   Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES.
545f24aebSYuriy Umanets  *   All rights reserved.
607fe6a43SSeth Howell  */
707fe6a43SSeth Howell 
807fe6a43SSeth Howell #include "vbdev_crypto.h"
907fe6a43SSeth Howell 
1013f97e67SAlexey Marchuk #include "spdk_internal/assert.h"
118e05b15cSDarek Stojaczyk #include "spdk/thread.h"
1207fe6a43SSeth Howell #include "spdk/bdev_module.h"
1313f97e67SAlexey Marchuk #include "spdk/likely.h"
1407fe6a43SSeth Howell 
152c09c37cSKrzysztof Karas /* This namespace UUID was generated using uuid_generate() method. */
162c09c37cSKrzysztof Karas #define BDEV_CRYPTO_NAMESPACE_UUID "078e3cf7-f4b4-4545-b2c3-d40045a64ae2"
172c09c37cSKrzysztof Karas 
1807fe6a43SSeth Howell struct bdev_names {
195ba9b78eSYuriy Umanets 	struct vbdev_crypto_opts	*opts;
2007fe6a43SSeth Howell 	TAILQ_ENTRY(bdev_names)		link;
2107fe6a43SSeth Howell };
225ba9b78eSYuriy Umanets 
235ba9b78eSYuriy Umanets /* List of crypto_bdev names and their base bdevs via configuration file. */
2407fe6a43SSeth Howell static TAILQ_HEAD(, bdev_names) g_bdev_names = TAILQ_HEAD_INITIALIZER(g_bdev_names);
2507fe6a43SSeth Howell 
2607fe6a43SSeth Howell struct vbdev_crypto {
2707fe6a43SSeth Howell 	struct spdk_bdev		*base_bdev;		/* the thing we're attaching to */
2807fe6a43SSeth Howell 	struct spdk_bdev_desc		*base_desc;		/* its descriptor we get from open */
2907fe6a43SSeth Howell 	struct spdk_bdev		crypto_bdev;		/* the crypto virtual bdev */
3013f97e67SAlexey Marchuk 	struct vbdev_crypto_opts	*opts;			/* crypto options such as names and DEK */
3107fe6a43SSeth Howell 	TAILQ_ENTRY(vbdev_crypto)	link;
32b3be320dSGangCao 	struct spdk_thread		*thread;		/* thread where base device is opened */
3307fe6a43SSeth Howell };
345ba9b78eSYuriy Umanets 
355ba9b78eSYuriy Umanets /* List of virtual bdevs and associated info for each. We keep the device friendly name here even
365ba9b78eSYuriy Umanets  * though its also in the device struct because we use it early on.
375ba9b78eSYuriy Umanets  */
3807fe6a43SSeth Howell static TAILQ_HEAD(, vbdev_crypto) g_vbdev_crypto = TAILQ_HEAD_INITIALIZER(g_vbdev_crypto);
3907fe6a43SSeth Howell 
4007fe6a43SSeth Howell /* The crypto vbdev channel struct. It is allocated and freed on my behalf by the io channel code.
4113f97e67SAlexey Marchuk  * We store things in here that are needed on per thread basis like the base_channel for this thread.
4207fe6a43SSeth Howell  */
4307fe6a43SSeth Howell struct crypto_io_channel {
4407fe6a43SSeth Howell 	struct spdk_io_channel		*base_ch;	/* IO channel of base device */
4513f97e67SAlexey Marchuk 	struct spdk_io_channel		*accel_channel;	/* Accel engine channel used for crypto ops */
4613f97e67SAlexey Marchuk 	struct spdk_accel_crypto_key	*crypto_key;
4713f97e67SAlexey Marchuk };
4813f97e67SAlexey Marchuk 
4913f97e67SAlexey Marchuk enum crypto_io_resubmit_state {
505d860c18SKonrad Sztyber 	CRYPTO_IO_DECRYPT_DONE,	/* Appended decrypt, need to read */
5113f97e67SAlexey Marchuk 	CRYPTO_IO_ENCRYPT_DONE,	/* Need to write */
5207fe6a43SSeth Howell };
5307fe6a43SSeth Howell 
5407fe6a43SSeth Howell /* This is the crypto per IO context that the bdev layer allocates for us opaquely and attaches to
5507fe6a43SSeth Howell  * each IO for us.
5607fe6a43SSeth Howell  */
5707fe6a43SSeth Howell struct crypto_bdev_io {
5807fe6a43SSeth Howell 	struct crypto_io_channel *crypto_ch;		/* need to store for crypto completion handling */
5907fe6a43SSeth Howell 	struct vbdev_crypto *crypto_bdev;		/* the crypto node struct associated with this IO */
6088e48baaSpaul luse 	/* Used for the single contiguous buffer that serves as the crypto destination target for writes */
612c8ddd08SShuhei Matsumoto 	uint64_t aux_num_blocks;			/* num of blocks for the contiguous buffer */
622c8ddd08SShuhei Matsumoto 	uint64_t aux_offset_blocks;			/* block offset on media */
632c8ddd08SShuhei Matsumoto 	void *aux_buf_raw;				/* raw buffer that the bdev layer gave us for write buffer */
642c8ddd08SShuhei Matsumoto 	struct iovec aux_buf_iov;			/* iov representing aligned contig write buffer */
6585cf8d27SKonrad Sztyber 	struct spdk_memory_domain *aux_domain;		/* memory domain of the aux buf */
6685cf8d27SKonrad Sztyber 	void *aux_domain_ctx;				/* memory domain ctx of the aux buf */
675d860c18SKonrad Sztyber 	struct spdk_accel_sequence *seq;		/* sequence of accel operations */
6807fe6a43SSeth Howell 
6907fe6a43SSeth Howell 	/* for bdev_io_wait */
7007fe6a43SSeth Howell 	struct spdk_bdev_io_wait_entry bdev_io_wait;
7113f97e67SAlexey Marchuk 	enum crypto_io_resubmit_state resubmit_state;
7207fe6a43SSeth Howell };
7307fe6a43SSeth Howell 
7413f97e67SAlexey Marchuk static void vbdev_crypto_queue_io(struct spdk_bdev_io *bdev_io,
7513f97e67SAlexey Marchuk 				  enum crypto_io_resubmit_state state);
7613f97e67SAlexey Marchuk static void _complete_internal_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg);
7713f97e67SAlexey Marchuk static void vbdev_crypto_examine(struct spdk_bdev *bdev);
7813f97e67SAlexey Marchuk static int vbdev_crypto_claim(const char *bdev_name);
7913f97e67SAlexey Marchuk static void vbdev_crypto_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io);
8007fe6a43SSeth Howell 
8107fe6a43SSeth Howell static void
8212492cb9SKonrad Sztyber crypto_io_fail(struct crypto_bdev_io *crypto_io)
8312492cb9SKonrad Sztyber {
8412492cb9SKonrad Sztyber 	struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(crypto_io);
85d7b2f5b9SAlexey Marchuk 	struct crypto_io_channel *crypto_ch = crypto_io->crypto_ch;
86d7b2f5b9SAlexey Marchuk 
87d7b2f5b9SAlexey Marchuk 	if (crypto_io->aux_buf_raw) {
88d7b2f5b9SAlexey Marchuk 		spdk_accel_put_buf(crypto_ch->accel_channel, crypto_io->aux_buf_raw,
89d7b2f5b9SAlexey Marchuk 				   crypto_io->aux_domain, crypto_io->aux_domain_ctx);
90d7b2f5b9SAlexey Marchuk 	}
9112492cb9SKonrad Sztyber 
9212492cb9SKonrad Sztyber 	/* This function can only be used to fail an IO that hasn't been sent to the base bdev,
9312492cb9SKonrad Sztyber 	 * otherwise accel sequence might have already been executed/aborted. */
9412492cb9SKonrad Sztyber 	spdk_accel_sequence_abort(crypto_io->seq);
9512492cb9SKonrad Sztyber 	spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
9612492cb9SKonrad Sztyber }
9712492cb9SKonrad Sztyber 
9812492cb9SKonrad Sztyber static void
9985cf8d27SKonrad Sztyber crypto_write(struct crypto_io_channel *crypto_ch, struct spdk_bdev_io *bdev_io)
10007fe6a43SSeth Howell {
10107fe6a43SSeth Howell 	struct vbdev_crypto *crypto_bdev = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_crypto,
10207fe6a43SSeth Howell 					   crypto_bdev);
10313f97e67SAlexey Marchuk 	struct crypto_bdev_io *crypto_io = (struct crypto_bdev_io *)bdev_io->driver_ctx;
10485cf8d27SKonrad Sztyber 	struct spdk_bdev_ext_io_opts opts = {};
10585cf8d27SKonrad Sztyber 	int rc;
10607fe6a43SSeth Howell 
10785cf8d27SKonrad Sztyber 	opts.size = sizeof(opts);
10885cf8d27SKonrad Sztyber 	opts.accel_sequence = crypto_io->seq;
10985cf8d27SKonrad Sztyber 	opts.memory_domain = crypto_io->aux_domain;
11085cf8d27SKonrad Sztyber 	opts.memory_domain_ctx = crypto_io->aux_domain_ctx;
11107fe6a43SSeth Howell 
11207fe6a43SSeth Howell 	/* Write the encrypted data. */
11385cf8d27SKonrad Sztyber 	rc = spdk_bdev_writev_blocks_ext(crypto_bdev->base_desc, crypto_ch->base_ch,
11413f97e67SAlexey Marchuk 					 &crypto_io->aux_buf_iov, 1, crypto_io->aux_offset_blocks,
115454ee6beSKonrad Sztyber 					 crypto_io->aux_num_blocks, _complete_internal_io,
11685cf8d27SKonrad Sztyber 					 bdev_io, &opts);
11785cf8d27SKonrad Sztyber 	if (spdk_unlikely(rc != 0)) {
11813f97e67SAlexey Marchuk 		if (rc == -ENOMEM) {
11985cf8d27SKonrad Sztyber 			SPDK_DEBUGLOG(vbdev_crypto, "No memory, queue the IO.\n");
12013f97e67SAlexey Marchuk 			vbdev_crypto_queue_io(bdev_io, CRYPTO_IO_ENCRYPT_DONE);
12107fe6a43SSeth Howell 		} else {
12285cf8d27SKonrad Sztyber 			SPDK_ERRLOG("Failed to submit bdev_io!\n");
12312492cb9SKonrad Sztyber 			crypto_io_fail(crypto_io);
12407fe6a43SSeth Howell 		}
12507fe6a43SSeth Howell 	}
1269afa85b5SYuriy Umanets }
1279afa85b5SYuriy Umanets 
12807fe6a43SSeth Howell /* We're either encrypting on the way down or decrypting on the way back. */
12985cf8d27SKonrad Sztyber static void
13085cf8d27SKonrad Sztyber crypto_encrypt(struct crypto_io_channel *crypto_ch, struct spdk_bdev_io *bdev_io)
13107fe6a43SSeth Howell {
13213f97e67SAlexey Marchuk 	struct crypto_bdev_io *crypto_io = (struct crypto_bdev_io *)bdev_io->driver_ctx;
1337e7d5f53SJacek Kalwas 	uint32_t blocklen = crypto_io->crypto_bdev->crypto_bdev.blocklen;
13413f97e67SAlexey Marchuk 	uint64_t total_length;
13513f97e67SAlexey Marchuk 	uint64_t alignment;
13685cf8d27SKonrad Sztyber 	void *aux_buf = crypto_io->aux_buf_raw;
13707fe6a43SSeth Howell 	int rc;
13807fe6a43SSeth Howell 
13907fe6a43SSeth Howell 	/* For encryption, we need to prepare a single contiguous buffer as the encryption
14007fe6a43SSeth Howell 	 * destination, we'll then pass that along for the write after encryption is done.
14107fe6a43SSeth Howell 	 * This is done to avoiding encrypting the provided write buffer which may be
14207fe6a43SSeth Howell 	 * undesirable in some use cases.
14307fe6a43SSeth Howell 	 */
1447e7d5f53SJacek Kalwas 	total_length = bdev_io->u.bdev.num_blocks * blocklen;
14513f97e67SAlexey Marchuk 	alignment = spdk_bdev_get_buf_align(&crypto_io->crypto_bdev->crypto_bdev);
14613f97e67SAlexey Marchuk 	crypto_io->aux_buf_iov.iov_len = total_length;
14713f97e67SAlexey Marchuk 	crypto_io->aux_buf_iov.iov_base  = (void *)(((uintptr_t)aux_buf + (alignment - 1)) & ~
14813f97e67SAlexey Marchuk 					   (alignment - 1));
14913f97e67SAlexey Marchuk 	crypto_io->aux_offset_blocks = bdev_io->u.bdev.offset_blocks;
15013f97e67SAlexey Marchuk 	crypto_io->aux_num_blocks = bdev_io->u.bdev.num_blocks;
15107fe6a43SSeth Howell 
15285cf8d27SKonrad Sztyber 	rc = spdk_accel_append_encrypt(&crypto_io->seq, crypto_ch->accel_channel,
15385cf8d27SKonrad Sztyber 				       crypto_ch->crypto_key, &crypto_io->aux_buf_iov, 1,
15485cf8d27SKonrad Sztyber 				       crypto_io->aux_domain, crypto_io->aux_domain_ctx,
15543cf39beSKonrad Sztyber 				       bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
15643cf39beSKonrad Sztyber 				       bdev_io->u.bdev.memory_domain,
15743cf39beSKonrad Sztyber 				       bdev_io->u.bdev.memory_domain_ctx,
15879e2a56fSKonrad Sztyber 				       bdev_io->u.bdev.offset_blocks, blocklen,
159d7b2f5b9SAlexey Marchuk 				       NULL, NULL);
16085cf8d27SKonrad Sztyber 	if (spdk_unlikely(rc != 0)) {
16185cf8d27SKonrad Sztyber 		spdk_accel_put_buf(crypto_ch->accel_channel, crypto_io->aux_buf_raw,
16285cf8d27SKonrad Sztyber 				   crypto_io->aux_domain, crypto_io->aux_domain_ctx);
16385cf8d27SKonrad Sztyber 		if (rc == -ENOMEM) {
16485cf8d27SKonrad Sztyber 			SPDK_DEBUGLOG(vbdev_crypto, "No memory, queue the IO.\n");
1653824f6e3SKonrad Sztyber 			spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
16685cf8d27SKonrad Sztyber 		} else {
16785cf8d27SKonrad Sztyber 			SPDK_ERRLOG("Failed to submit bdev_io!\n");
16812492cb9SKonrad Sztyber 			crypto_io_fail(crypto_io);
169c602bd81Spaul luse 		}
17007fe6a43SSeth Howell 
17185cf8d27SKonrad Sztyber 		return;
17285cf8d27SKonrad Sztyber 	}
17385cf8d27SKonrad Sztyber 
17485cf8d27SKonrad Sztyber 	crypto_write(crypto_ch, bdev_io);
17507fe6a43SSeth Howell }
17607fe6a43SSeth Howell 
17707fe6a43SSeth Howell static void
17807fe6a43SSeth Howell _complete_internal_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
17907fe6a43SSeth Howell {
18007fe6a43SSeth Howell 	struct spdk_bdev_io *orig_io = cb_arg;
181d7b2f5b9SAlexey Marchuk 	struct crypto_bdev_io *crypto_io = (struct crypto_bdev_io *)orig_io->driver_ctx;
182d7b2f5b9SAlexey Marchuk 	struct crypto_io_channel *crypto_ch = crypto_io->crypto_ch;
18307fe6a43SSeth Howell 
184d7b2f5b9SAlexey Marchuk 	if (crypto_io->aux_buf_raw) {
185d7b2f5b9SAlexey Marchuk 		spdk_accel_put_buf(crypto_ch->accel_channel, crypto_io->aux_buf_raw,
186d7b2f5b9SAlexey Marchuk 				   crypto_io->aux_domain, crypto_io->aux_domain_ctx);
187d7b2f5b9SAlexey Marchuk 	}
188d7b2f5b9SAlexey Marchuk 
1898be08e43SKonrad Sztyber 	spdk_bdev_io_complete_base_io_status(orig_io, bdev_io);
19007fe6a43SSeth Howell 	spdk_bdev_free_io(bdev_io);
19107fe6a43SSeth Howell }
19207fe6a43SSeth Howell 
1935d860c18SKonrad Sztyber static void crypto_read(struct crypto_io_channel *crypto_ch, struct spdk_bdev_io *bdev_io);
1945d860c18SKonrad Sztyber 
19507fe6a43SSeth Howell static void
19607fe6a43SSeth Howell vbdev_crypto_resubmit_io(void *arg)
19707fe6a43SSeth Howell {
19807fe6a43SSeth Howell 	struct spdk_bdev_io *bdev_io = (struct spdk_bdev_io *)arg;
19913f97e67SAlexey Marchuk 	struct crypto_bdev_io *crypto_io = (struct crypto_bdev_io *)bdev_io->driver_ctx;
20007fe6a43SSeth Howell 
20113f97e67SAlexey Marchuk 	switch (crypto_io->resubmit_state) {
20213f97e67SAlexey Marchuk 	case CRYPTO_IO_ENCRYPT_DONE:
20385cf8d27SKonrad Sztyber 		crypto_write(crypto_io->crypto_ch, bdev_io);
20413f97e67SAlexey Marchuk 		break;
2055d860c18SKonrad Sztyber 	case CRYPTO_IO_DECRYPT_DONE:
2065d860c18SKonrad Sztyber 		crypto_read(crypto_io->crypto_ch, bdev_io);
20713f97e67SAlexey Marchuk 		break;
20813f97e67SAlexey Marchuk 	default:
20913f97e67SAlexey Marchuk 		SPDK_UNREACHABLE();
21013f97e67SAlexey Marchuk 	}
21107fe6a43SSeth Howell }
21207fe6a43SSeth Howell 
21307fe6a43SSeth Howell static void
21413f97e67SAlexey Marchuk vbdev_crypto_queue_io(struct spdk_bdev_io *bdev_io, enum crypto_io_resubmit_state state)
21507fe6a43SSeth Howell {
21613f97e67SAlexey Marchuk 	struct crypto_bdev_io *crypto_io = (struct crypto_bdev_io *)bdev_io->driver_ctx;
21707fe6a43SSeth Howell 	int rc;
21807fe6a43SSeth Howell 
21913f97e67SAlexey Marchuk 	crypto_io->bdev_io_wait.bdev = bdev_io->bdev;
22013f97e67SAlexey Marchuk 	crypto_io->bdev_io_wait.cb_fn = vbdev_crypto_resubmit_io;
22113f97e67SAlexey Marchuk 	crypto_io->bdev_io_wait.cb_arg = bdev_io;
22213f97e67SAlexey Marchuk 	crypto_io->resubmit_state = state;
22307fe6a43SSeth Howell 
22413f97e67SAlexey Marchuk 	rc = spdk_bdev_queue_io_wait(bdev_io->bdev, crypto_io->crypto_ch->base_ch,
22513f97e67SAlexey Marchuk 				     &crypto_io->bdev_io_wait);
22607fe6a43SSeth Howell 	if (rc != 0) {
22707fe6a43SSeth Howell 		SPDK_ERRLOG("Queue io failed in vbdev_crypto_queue_io, rc=%d.\n", rc);
22812492cb9SKonrad Sztyber 		crypto_io_fail(crypto_io);
22907fe6a43SSeth Howell 	}
23007fe6a43SSeth Howell }
23107fe6a43SSeth Howell 
2325d860c18SKonrad Sztyber static void
2335d860c18SKonrad Sztyber crypto_read(struct crypto_io_channel *crypto_ch, struct spdk_bdev_io *bdev_io)
2345d860c18SKonrad Sztyber {
2355d860c18SKonrad Sztyber 	struct crypto_bdev_io *crypto_io = (struct crypto_bdev_io *)bdev_io->driver_ctx;
2365d860c18SKonrad Sztyber 	struct vbdev_crypto *crypto_bdev = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_crypto,
2375d860c18SKonrad Sztyber 					   crypto_bdev);
2385d860c18SKonrad Sztyber 	struct spdk_bdev_ext_io_opts opts = {};
2395d860c18SKonrad Sztyber 	int rc;
2405d860c18SKonrad Sztyber 
2415d860c18SKonrad Sztyber 	opts.size = sizeof(opts);
2425d860c18SKonrad Sztyber 	opts.accel_sequence = crypto_io->seq;
24343cf39beSKonrad Sztyber 	opts.memory_domain = bdev_io->u.bdev.memory_domain;
24443cf39beSKonrad Sztyber 	opts.memory_domain_ctx = bdev_io->u.bdev.memory_domain_ctx;
2455d860c18SKonrad Sztyber 
2465d860c18SKonrad Sztyber 	rc = spdk_bdev_readv_blocks_ext(crypto_bdev->base_desc, crypto_ch->base_ch,
2475d860c18SKonrad Sztyber 					bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
2485d860c18SKonrad Sztyber 					bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks,
249454ee6beSKonrad Sztyber 					_complete_internal_io, bdev_io, &opts);
2505d860c18SKonrad Sztyber 	if (rc != 0) {
2515d860c18SKonrad Sztyber 		if (rc == -ENOMEM) {
2525d860c18SKonrad Sztyber 			SPDK_DEBUGLOG(vbdev_crypto, "No memory, queue the IO.\n");
2535d860c18SKonrad Sztyber 			vbdev_crypto_queue_io(bdev_io, CRYPTO_IO_DECRYPT_DONE);
2545d860c18SKonrad Sztyber 		} else {
2555d860c18SKonrad Sztyber 			SPDK_ERRLOG("Failed to submit bdev_io!\n");
25612492cb9SKonrad Sztyber 			crypto_io_fail(crypto_io);
2575d860c18SKonrad Sztyber 		}
2585d860c18SKonrad Sztyber 	}
2595d860c18SKonrad Sztyber }
2605d860c18SKonrad Sztyber 
26107fe6a43SSeth Howell /* Callback for getting a buf from the bdev pool in the event that the caller passed
26207fe6a43SSeth Howell  * in NULL, we need to own the buffer so it doesn't get freed by another vbdev module
26307fe6a43SSeth Howell  * beneath us before we're done with it.
26407fe6a43SSeth Howell  */
26507fe6a43SSeth Howell static void
26607fe6a43SSeth Howell crypto_read_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
26707fe6a43SSeth Howell 		       bool success)
26807fe6a43SSeth Howell {
26907fe6a43SSeth Howell 	struct crypto_io_channel *crypto_ch = spdk_io_channel_get_ctx(ch);
2705d860c18SKonrad Sztyber 	struct crypto_bdev_io *crypto_io = (struct crypto_bdev_io *)bdev_io->driver_ctx;
2715d860c18SKonrad Sztyber 	uint32_t blocklen = crypto_io->crypto_bdev->crypto_bdev.blocklen;
27207fe6a43SSeth Howell 	int rc;
27307fe6a43SSeth Howell 
27407fe6a43SSeth Howell 	if (!success) {
27512492cb9SKonrad Sztyber 		crypto_io_fail(crypto_io);
27607fe6a43SSeth Howell 		return;
27707fe6a43SSeth Howell 	}
27807fe6a43SSeth Howell 
2795d860c18SKonrad Sztyber 	rc = spdk_accel_append_decrypt(&crypto_io->seq, crypto_ch->accel_channel,
28043cf39beSKonrad Sztyber 				       crypto_ch->crypto_key,
28143cf39beSKonrad Sztyber 				       bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
28243cf39beSKonrad Sztyber 				       bdev_io->u.bdev.memory_domain,
28343cf39beSKonrad Sztyber 				       bdev_io->u.bdev.memory_domain_ctx,
28443cf39beSKonrad Sztyber 				       bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
28543cf39beSKonrad Sztyber 				       bdev_io->u.bdev.memory_domain,
28643cf39beSKonrad Sztyber 				       bdev_io->u.bdev.memory_domain_ctx,
28779e2a56fSKonrad Sztyber 				       bdev_io->u.bdev.offset_blocks, blocklen,
2885d860c18SKonrad Sztyber 				       NULL, NULL);
28907fe6a43SSeth Howell 	if (rc != 0) {
29007fe6a43SSeth Howell 		if (rc == -ENOMEM) {
2912172c432STomasz Zawadzki 			SPDK_DEBUGLOG(vbdev_crypto, "No memory, queue the IO.\n");
2923824f6e3SKonrad Sztyber 			spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
29307fe6a43SSeth Howell 		} else {
2944a11579eSYuriy Umanets 			SPDK_ERRLOG("Failed to submit bdev_io!\n");
29512492cb9SKonrad Sztyber 			crypto_io_fail(crypto_io);
29607fe6a43SSeth Howell 		}
2975d860c18SKonrad Sztyber 
2985d860c18SKonrad Sztyber 		return;
29907fe6a43SSeth Howell 	}
3005d860c18SKonrad Sztyber 
3015d860c18SKonrad Sztyber 	crypto_read(crypto_ch, bdev_io);
30207fe6a43SSeth Howell }
30307fe6a43SSeth Howell 
30407fe6a43SSeth Howell /* Called when someone submits IO to this crypto vbdev. For IO's not relevant to crypto,
30507fe6a43SSeth Howell  * we're simply passing it on here via SPDK IO calls which in turn allocate another bdev IO
30607fe6a43SSeth Howell  * and call our cpl callback provided below along with the original bdev_io so that we can
30707fe6a43SSeth Howell  * complete it once this IO completes. For crypto operations, we'll either encrypt it first
30807fe6a43SSeth Howell  * (writes) then call back into bdev to submit it or we'll submit a read and then catch it
30907fe6a43SSeth Howell  * on the way back for decryption.
31007fe6a43SSeth Howell  */
31107fe6a43SSeth Howell static void
31207fe6a43SSeth Howell vbdev_crypto_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
31307fe6a43SSeth Howell {
31407fe6a43SSeth Howell 	struct vbdev_crypto *crypto_bdev = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_crypto,
31507fe6a43SSeth Howell 					   crypto_bdev);
31607fe6a43SSeth Howell 	struct crypto_io_channel *crypto_ch = spdk_io_channel_get_ctx(ch);
31713f97e67SAlexey Marchuk 	struct crypto_bdev_io *crypto_io = (struct crypto_bdev_io *)bdev_io->driver_ctx;
31807fe6a43SSeth Howell 	int rc = 0;
31907fe6a43SSeth Howell 
32013f97e67SAlexey Marchuk 	memset(crypto_io, 0, sizeof(struct crypto_bdev_io));
32113f97e67SAlexey Marchuk 	crypto_io->crypto_bdev = crypto_bdev;
32213f97e67SAlexey Marchuk 	crypto_io->crypto_ch = crypto_ch;
3235d860c18SKonrad Sztyber 	crypto_io->seq = bdev_io->u.bdev.accel_sequence;
32407fe6a43SSeth Howell 
32507fe6a43SSeth Howell 	switch (bdev_io->type) {
32607fe6a43SSeth Howell 	case SPDK_BDEV_IO_TYPE_READ:
32707fe6a43SSeth Howell 		spdk_bdev_io_get_buf(bdev_io, crypto_read_get_buf_cb,
32807fe6a43SSeth Howell 				     bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
32907fe6a43SSeth Howell 		break;
33007fe6a43SSeth Howell 	case SPDK_BDEV_IO_TYPE_WRITE:
33185cf8d27SKonrad Sztyber 		/* For encryption we don't want to encrypt the data in place as the host isn't
33285cf8d27SKonrad Sztyber 		 * expecting us to mangle its data buffers so we need to encrypt into the aux accel
33385cf8d27SKonrad Sztyber 		 * buffer, then we can use that as the source for the disk data transfer.
3342c8ddd08SShuhei Matsumoto 		 */
33585cf8d27SKonrad Sztyber 		rc = spdk_accel_get_buf(crypto_ch->accel_channel,
33685cf8d27SKonrad Sztyber 					bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen,
33785cf8d27SKonrad Sztyber 					&crypto_io->aux_buf_raw, &crypto_io->aux_domain,
33885cf8d27SKonrad Sztyber 					&crypto_io->aux_domain_ctx);
33985cf8d27SKonrad Sztyber 		if (rc == 0) {
34085cf8d27SKonrad Sztyber 			crypto_encrypt(crypto_ch, bdev_io);
34185cf8d27SKonrad Sztyber 		}
34207fe6a43SSeth Howell 		break;
34307fe6a43SSeth Howell 	case SPDK_BDEV_IO_TYPE_UNMAP:
34407fe6a43SSeth Howell 		rc = spdk_bdev_unmap_blocks(crypto_bdev->base_desc, crypto_ch->base_ch,
34507fe6a43SSeth Howell 					    bdev_io->u.bdev.offset_blocks,
34607fe6a43SSeth Howell 					    bdev_io->u.bdev.num_blocks,
34707fe6a43SSeth Howell 					    _complete_internal_io, bdev_io);
34807fe6a43SSeth Howell 		break;
34907fe6a43SSeth Howell 	case SPDK_BDEV_IO_TYPE_FLUSH:
35007fe6a43SSeth Howell 		rc = spdk_bdev_flush_blocks(crypto_bdev->base_desc, crypto_ch->base_ch,
35107fe6a43SSeth Howell 					    bdev_io->u.bdev.offset_blocks,
35207fe6a43SSeth Howell 					    bdev_io->u.bdev.num_blocks,
35307fe6a43SSeth Howell 					    _complete_internal_io, bdev_io);
35407fe6a43SSeth Howell 		break;
35507fe6a43SSeth Howell 	case SPDK_BDEV_IO_TYPE_RESET:
35607fe6a43SSeth Howell 		rc = spdk_bdev_reset(crypto_bdev->base_desc, crypto_ch->base_ch,
35707fe6a43SSeth Howell 				     _complete_internal_io, bdev_io);
35807fe6a43SSeth Howell 		break;
35907fe6a43SSeth Howell 	case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
36007fe6a43SSeth Howell 	default:
36107fe6a43SSeth Howell 		SPDK_ERRLOG("crypto: unknown I/O type %d\n", bdev_io->type);
36212492cb9SKonrad Sztyber 		rc = -EINVAL;
36312492cb9SKonrad Sztyber 		break;
36407fe6a43SSeth Howell 	}
36507fe6a43SSeth Howell 
36607fe6a43SSeth Howell 	if (rc != 0) {
36707fe6a43SSeth Howell 		if (rc == -ENOMEM) {
3682172c432STomasz Zawadzki 			SPDK_DEBUGLOG(vbdev_crypto, "No memory, queue the IO.\n");
3693824f6e3SKonrad Sztyber 			spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
37007fe6a43SSeth Howell 		} else {
3714a11579eSYuriy Umanets 			SPDK_ERRLOG("Failed to submit bdev_io!\n");
37212492cb9SKonrad Sztyber 			crypto_io_fail(crypto_io);
37307fe6a43SSeth Howell 		}
37407fe6a43SSeth Howell 	}
37507fe6a43SSeth Howell }
37607fe6a43SSeth Howell 
37707fe6a43SSeth Howell /* We'll just call the base bdev and let it answer except for WZ command which
37807fe6a43SSeth Howell  * we always say we don't support so that the bdev layer will actually send us
37907fe6a43SSeth Howell  * real writes that we can encrypt.
38007fe6a43SSeth Howell  */
38107fe6a43SSeth Howell static bool
38207fe6a43SSeth Howell vbdev_crypto_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type)
38307fe6a43SSeth Howell {
38407fe6a43SSeth Howell 	struct vbdev_crypto *crypto_bdev = (struct vbdev_crypto *)ctx;
38507fe6a43SSeth Howell 
38607fe6a43SSeth Howell 	switch (io_type) {
38707fe6a43SSeth Howell 	case SPDK_BDEV_IO_TYPE_WRITE:
38807fe6a43SSeth Howell 	case SPDK_BDEV_IO_TYPE_UNMAP:
38907fe6a43SSeth Howell 	case SPDK_BDEV_IO_TYPE_RESET:
39007fe6a43SSeth Howell 	case SPDK_BDEV_IO_TYPE_READ:
39107fe6a43SSeth Howell 	case SPDK_BDEV_IO_TYPE_FLUSH:
39207fe6a43SSeth Howell 		return spdk_bdev_io_type_supported(crypto_bdev->base_bdev, io_type);
39307fe6a43SSeth Howell 	case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
39407fe6a43SSeth Howell 	/* Force the bdev layer to issue actual writes of zeroes so we can
39507fe6a43SSeth Howell 	 * encrypt them as regular writes.
39607fe6a43SSeth Howell 	 */
39707fe6a43SSeth Howell 	default:
39807fe6a43SSeth Howell 		return false;
39907fe6a43SSeth Howell 	}
40007fe6a43SSeth Howell }
40107fe6a43SSeth Howell 
40207fe6a43SSeth Howell /* Callback for unregistering the IO device. */
40307fe6a43SSeth Howell static void
40407fe6a43SSeth Howell _device_unregister_cb(void *io_device)
40507fe6a43SSeth Howell {
40607fe6a43SSeth Howell 	struct vbdev_crypto *crypto_bdev = io_device;
40707fe6a43SSeth Howell 
40807fe6a43SSeth Howell 	/* Done with this crypto_bdev. */
4095ba9b78eSYuriy Umanets 	crypto_bdev->opts = NULL;
410e64728f0SKrzysztof Karas 
411e64728f0SKrzysztof Karas 	spdk_bdev_destruct_done(&crypto_bdev->crypto_bdev, 0);
41207fe6a43SSeth Howell 	free(crypto_bdev->crypto_bdev.name);
41307fe6a43SSeth Howell 	free(crypto_bdev);
41407fe6a43SSeth Howell }
41507fe6a43SSeth Howell 
416b3be320dSGangCao /* Wrapper for the bdev close operation. */
417b3be320dSGangCao static void
418b3be320dSGangCao _vbdev_crypto_destruct(void *ctx)
419b3be320dSGangCao {
420b3be320dSGangCao 	struct spdk_bdev_desc *desc = ctx;
421b3be320dSGangCao 
422b3be320dSGangCao 	spdk_bdev_close(desc);
423b3be320dSGangCao }
424b3be320dSGangCao 
42507fe6a43SSeth Howell /* Called after we've unregistered following a hot remove callback.
42607fe6a43SSeth Howell  * Our finish entry point will be called next.
42707fe6a43SSeth Howell  */
42807fe6a43SSeth Howell static int
42907fe6a43SSeth Howell vbdev_crypto_destruct(void *ctx)
43007fe6a43SSeth Howell {
43107fe6a43SSeth Howell 	struct vbdev_crypto *crypto_bdev = (struct vbdev_crypto *)ctx;
43207fe6a43SSeth Howell 
43307fe6a43SSeth Howell 	/* Remove this device from the internal list */
43407fe6a43SSeth Howell 	TAILQ_REMOVE(&g_vbdev_crypto, crypto_bdev, link);
43507fe6a43SSeth Howell 
43607fe6a43SSeth Howell 	/* Unclaim the underlying bdev. */
43707fe6a43SSeth Howell 	spdk_bdev_module_release_bdev(crypto_bdev->base_bdev);
43807fe6a43SSeth Howell 
439b3be320dSGangCao 	/* Close the underlying bdev on its same opened thread. */
440b3be320dSGangCao 	if (crypto_bdev->thread && crypto_bdev->thread != spdk_get_thread()) {
441b3be320dSGangCao 		spdk_thread_send_msg(crypto_bdev->thread, _vbdev_crypto_destruct, crypto_bdev->base_desc);
442b3be320dSGangCao 	} else {
44307fe6a43SSeth Howell 		spdk_bdev_close(crypto_bdev->base_desc);
444b3be320dSGangCao 	}
44507fe6a43SSeth Howell 
44607fe6a43SSeth Howell 	/* Unregister the io_device. */
44707fe6a43SSeth Howell 	spdk_io_device_unregister(crypto_bdev, _device_unregister_cb);
44807fe6a43SSeth Howell 
449e64728f0SKrzysztof Karas 	return 1;
45007fe6a43SSeth Howell }
45107fe6a43SSeth Howell 
45207fe6a43SSeth Howell /* We supplied this as an entry point for upper layers who want to communicate to this
45307fe6a43SSeth Howell  * bdev.  This is how they get a channel. We are passed the same context we provided when
45407fe6a43SSeth Howell  * we created our crypto vbdev in examine() which, for this bdev, is the address of one of
45507fe6a43SSeth Howell  * our context nodes. From here we'll ask the SPDK channel code to fill out our channel
45607fe6a43SSeth Howell  * struct and we'll keep it in our crypto node.
45707fe6a43SSeth Howell  */
45807fe6a43SSeth Howell static struct spdk_io_channel *
45907fe6a43SSeth Howell vbdev_crypto_get_io_channel(void *ctx)
46007fe6a43SSeth Howell {
46107fe6a43SSeth Howell 	struct vbdev_crypto *crypto_bdev = (struct vbdev_crypto *)ctx;
46207fe6a43SSeth Howell 
46307fe6a43SSeth Howell 	/* The IO channel code will allocate a channel for us which consists of
46488e48baaSpaul luse 	 * the SPDK channel structure plus the size of our crypto_io_channel struct
46507fe6a43SSeth Howell 	 * that we passed in when we registered our IO device. It will then call
46607fe6a43SSeth Howell 	 * our channel create callback to populate any elements that we need to
46707fe6a43SSeth Howell 	 * update.
46807fe6a43SSeth Howell 	 */
46907fe6a43SSeth Howell 	return spdk_get_io_channel(crypto_bdev);
47007fe6a43SSeth Howell }
47107fe6a43SSeth Howell 
4722c49e910SMaciej Wawryk /* This is the output for bdev_get_bdevs() for this vbdev */
47307fe6a43SSeth Howell static int
47407fe6a43SSeth Howell vbdev_crypto_dump_info_json(void *ctx, struct spdk_json_write_ctx *w)
47507fe6a43SSeth Howell {
47607fe6a43SSeth Howell 	struct vbdev_crypto *crypto_bdev = (struct vbdev_crypto *)ctx;
47707fe6a43SSeth Howell 
47807fe6a43SSeth Howell 	spdk_json_write_name(w, "crypto");
47907fe6a43SSeth Howell 	spdk_json_write_object_begin(w);
48007fe6a43SSeth Howell 	spdk_json_write_named_string(w, "base_bdev_name", spdk_bdev_get_name(crypto_bdev->base_bdev));
48107fe6a43SSeth Howell 	spdk_json_write_named_string(w, "name", spdk_bdev_get_name(&crypto_bdev->crypto_bdev));
48213f97e67SAlexey Marchuk 	spdk_json_write_named_string(w, "key_name", crypto_bdev->opts->key->param.key_name);
48307fe6a43SSeth Howell 	spdk_json_write_object_end(w);
48413f97e67SAlexey Marchuk 
48513f97e67SAlexey Marchuk 	return 0;
48607fe6a43SSeth Howell }
48707fe6a43SSeth Howell 
48807fe6a43SSeth Howell static int
48907fe6a43SSeth Howell vbdev_crypto_config_json(struct spdk_json_write_ctx *w)
49007fe6a43SSeth Howell {
49107fe6a43SSeth Howell 	struct vbdev_crypto *crypto_bdev;
49207fe6a43SSeth Howell 
49307fe6a43SSeth Howell 	TAILQ_FOREACH(crypto_bdev, &g_vbdev_crypto, link) {
49407fe6a43SSeth Howell 		spdk_json_write_object_begin(w);
49507fe6a43SSeth Howell 		spdk_json_write_named_string(w, "method", "bdev_crypto_create");
49607fe6a43SSeth Howell 		spdk_json_write_named_object_begin(w, "params");
49707fe6a43SSeth Howell 		spdk_json_write_named_string(w, "base_bdev_name", spdk_bdev_get_name(crypto_bdev->base_bdev));
49807fe6a43SSeth Howell 		spdk_json_write_named_string(w, "name", spdk_bdev_get_name(&crypto_bdev->crypto_bdev));
49913f97e67SAlexey Marchuk 		spdk_json_write_named_string(w, "key_name", crypto_bdev->opts->key->param.key_name);
50007fe6a43SSeth Howell 		spdk_json_write_object_end(w);
50107fe6a43SSeth Howell 		spdk_json_write_object_end(w);
50207fe6a43SSeth Howell 	}
50307fe6a43SSeth Howell 	return 0;
50407fe6a43SSeth Howell }
50507fe6a43SSeth Howell 
5061a6878dbSpaul luse /* We provide this callback for the SPDK channel code to create a channel using
5071a6878dbSpaul luse  * the channel struct we provided in our module get_io_channel() entry point. Here
5081a6878dbSpaul luse  * we get and save off an underlying base channel of the device below us so that
5091a6878dbSpaul luse  * we can communicate with the base bdev on a per channel basis. We also register the
5101a6878dbSpaul luse  * poller used to complete crypto operations from the device.
5111a6878dbSpaul luse  */
5121a6878dbSpaul luse static int
5131a6878dbSpaul luse crypto_bdev_ch_create_cb(void *io_device, void *ctx_buf)
5141a6878dbSpaul luse {
5151a6878dbSpaul luse 	struct crypto_io_channel *crypto_ch = ctx_buf;
5161a6878dbSpaul luse 	struct vbdev_crypto *crypto_bdev = io_device;
5171a6878dbSpaul luse 
5181a6878dbSpaul luse 	crypto_ch->base_ch = spdk_bdev_get_io_channel(crypto_bdev->base_desc);
519bc6764aaSKonrad Sztyber 	if (crypto_ch->base_ch == NULL) {
520bc6764aaSKonrad Sztyber 		SPDK_ERRLOG("Failed to get base bdev IO channel (bdev: %s)\n",
521bc6764aaSKonrad Sztyber 			    crypto_bdev->crypto_bdev.name);
522bc6764aaSKonrad Sztyber 		return -ENOMEM;
523bc6764aaSKonrad Sztyber 	}
524bc6764aaSKonrad Sztyber 
52513f97e67SAlexey Marchuk 	crypto_ch->accel_channel = spdk_accel_get_io_channel();
526bc6764aaSKonrad Sztyber 	if (crypto_ch->accel_channel == NULL) {
527bc6764aaSKonrad Sztyber 		SPDK_ERRLOG("Failed to get accel IO channel (bdev: %s)\n",
528bc6764aaSKonrad Sztyber 			    crypto_bdev->crypto_bdev.name);
529bc6764aaSKonrad Sztyber 		spdk_put_io_channel(crypto_ch->base_ch);
530bc6764aaSKonrad Sztyber 		return -ENOMEM;
531bc6764aaSKonrad Sztyber 	}
532bc6764aaSKonrad Sztyber 
53313f97e67SAlexey Marchuk 	crypto_ch->crypto_key = crypto_bdev->opts->key;
53407fe6a43SSeth Howell 
53507fe6a43SSeth Howell 	return 0;
53607fe6a43SSeth Howell }
53707fe6a43SSeth Howell 
53807fe6a43SSeth Howell /* We provide this callback for the SPDK channel code to destroy a channel
53907fe6a43SSeth Howell  * created with our create callback. We just need to undo anything we did
54007fe6a43SSeth Howell  * when we created.
54107fe6a43SSeth Howell  */
54207fe6a43SSeth Howell static void
54307fe6a43SSeth Howell crypto_bdev_ch_destroy_cb(void *io_device, void *ctx_buf)
54407fe6a43SSeth Howell {
54507fe6a43SSeth Howell 	struct crypto_io_channel *crypto_ch = ctx_buf;
54607fe6a43SSeth Howell 
54707fe6a43SSeth Howell 	spdk_put_io_channel(crypto_ch->base_ch);
54813f97e67SAlexey Marchuk 	spdk_put_io_channel(crypto_ch->accel_channel);
54907fe6a43SSeth Howell }
55007fe6a43SSeth Howell 
55107fe6a43SSeth Howell /* Create the association from the bdev and vbdev name and insert
55207fe6a43SSeth Howell  * on the global list. */
55307fe6a43SSeth Howell static int
5545ba9b78eSYuriy Umanets vbdev_crypto_insert_name(struct vbdev_crypto_opts *opts, struct bdev_names **out)
55507fe6a43SSeth Howell {
55607fe6a43SSeth Howell 	struct bdev_names *name;
5575ba9b78eSYuriy Umanets 
5585ba9b78eSYuriy Umanets 	assert(opts);
5595ba9b78eSYuriy Umanets 	assert(out);
56007fe6a43SSeth Howell 
56107fe6a43SSeth Howell 	TAILQ_FOREACH(name, &g_bdev_names, link) {
5625ba9b78eSYuriy Umanets 		if (strcmp(opts->vbdev_name, name->opts->vbdev_name) == 0) {
5635ba9b78eSYuriy Umanets 			SPDK_ERRLOG("Crypto bdev %s already exists\n", opts->vbdev_name);
56407fe6a43SSeth Howell 			return -EEXIST;
56507fe6a43SSeth Howell 		}
56607fe6a43SSeth Howell 	}
56707fe6a43SSeth Howell 
5685ba9b78eSYuriy Umanets 	name = calloc(1, sizeof(struct bdev_names));
5695ba9b78eSYuriy Umanets 	if (!name) {
5705ba9b78eSYuriy Umanets 		SPDK_ERRLOG("Failed to allocate memory for bdev_names.\n");
5715ba9b78eSYuriy Umanets 		return -ENOMEM;
572f241068fSpaul luse 	}
573f241068fSpaul luse 
5745ba9b78eSYuriy Umanets 	name->opts = opts;
57507fe6a43SSeth Howell 	TAILQ_INSERT_TAIL(&g_bdev_names, name, link);
5765ba9b78eSYuriy Umanets 	*out = name;
57707fe6a43SSeth Howell 
57807fe6a43SSeth Howell 	return 0;
5790d857f44SYuriy Umanets }
5805ba9b78eSYuriy Umanets 
5815ba9b78eSYuriy Umanets void
5825ba9b78eSYuriy Umanets free_crypto_opts(struct vbdev_crypto_opts *opts)
5835ba9b78eSYuriy Umanets {
5845ba9b78eSYuriy Umanets 	free(opts->bdev_name);
5855ba9b78eSYuriy Umanets 	free(opts->vbdev_name);
5865ba9b78eSYuriy Umanets 	free(opts);
5875ba9b78eSYuriy Umanets }
5885ba9b78eSYuriy Umanets 
5895ba9b78eSYuriy Umanets static void
5905ba9b78eSYuriy Umanets vbdev_crypto_delete_name(struct bdev_names *name)
5915ba9b78eSYuriy Umanets {
5925ba9b78eSYuriy Umanets 	TAILQ_REMOVE(&g_bdev_names, name, link);
5935ba9b78eSYuriy Umanets 	if (name->opts) {
59413f97e67SAlexey Marchuk 		if (name->opts->key_owner && name->opts->key) {
59513f97e67SAlexey Marchuk 			spdk_accel_crypto_key_destroy(name->opts->key);
59613f97e67SAlexey Marchuk 		}
5975ba9b78eSYuriy Umanets 		free_crypto_opts(name->opts);
5985ba9b78eSYuriy Umanets 		name->opts = NULL;
5995ba9b78eSYuriy Umanets 	}
60007fe6a43SSeth Howell 	free(name);
60107fe6a43SSeth Howell }
60207fe6a43SSeth Howell 
60307fe6a43SSeth Howell /* RPC entry point for crypto creation. */
60407fe6a43SSeth Howell int
6055ba9b78eSYuriy Umanets create_crypto_disk(struct vbdev_crypto_opts *opts)
60607fe6a43SSeth Howell {
6075ba9b78eSYuriy Umanets 	struct bdev_names *name = NULL;
608491e6c43SShuhei Matsumoto 	int rc;
60907fe6a43SSeth Howell 
6105ba9b78eSYuriy Umanets 	rc = vbdev_crypto_insert_name(opts, &name);
61107fe6a43SSeth Howell 	if (rc) {
61207fe6a43SSeth Howell 		return rc;
61307fe6a43SSeth Howell 	}
61407fe6a43SSeth Howell 
6155ba9b78eSYuriy Umanets 	rc = vbdev_crypto_claim(opts->bdev_name);
616491e6c43SShuhei Matsumoto 	if (rc == -ENODEV) {
61707fe6a43SSeth Howell 		SPDK_NOTICELOG("vbdev creation deferred pending base bdev arrival\n");
618491e6c43SShuhei Matsumoto 		rc = 0;
61907fe6a43SSeth Howell 	}
62007fe6a43SSeth Howell 
6215ba9b78eSYuriy Umanets 	if (rc) {
6225ba9b78eSYuriy Umanets 		assert(name != NULL);
6235ba9b78eSYuriy Umanets 		/* In case of error we let the caller function to deallocate @opts
62413f97e67SAlexey Marchuk 		 * since it is its responsibility. Setting name->opts = NULL let's
6255ba9b78eSYuriy Umanets 		 * vbdev_crypto_delete_name() know it does not have to do anything
6265ba9b78eSYuriy Umanets 		 * about @opts.
6275ba9b78eSYuriy Umanets 		 */
6285ba9b78eSYuriy Umanets 		name->opts = NULL;
6295ba9b78eSYuriy Umanets 		vbdev_crypto_delete_name(name);
6305ba9b78eSYuriy Umanets 	}
63107fe6a43SSeth Howell 	return rc;
63207fe6a43SSeth Howell }
63307fe6a43SSeth Howell 
63488e48baaSpaul luse /* Called at driver init time, parses config file to prepare for examine calls,
63507fe6a43SSeth Howell  * also fully initializes the crypto drivers.
63607fe6a43SSeth Howell  */
63707fe6a43SSeth Howell static int
63807fe6a43SSeth Howell vbdev_crypto_init(void)
63907fe6a43SSeth Howell {
64013f97e67SAlexey Marchuk 	return 0;
64107fe6a43SSeth Howell }
64207fe6a43SSeth Howell 
64307fe6a43SSeth Howell /* Called when the entire module is being torn down. */
64407fe6a43SSeth Howell static void
64507fe6a43SSeth Howell vbdev_crypto_finish(void)
64607fe6a43SSeth Howell {
64707fe6a43SSeth Howell 	struct bdev_names *name;
64807fe6a43SSeth Howell 
64907fe6a43SSeth Howell 	while ((name = TAILQ_FIRST(&g_bdev_names))) {
6505ba9b78eSYuriy Umanets 		vbdev_crypto_delete_name(name);
65107fe6a43SSeth Howell 	}
65207fe6a43SSeth Howell }
65307fe6a43SSeth Howell 
65407fe6a43SSeth Howell /* During init we'll be asked how much memory we'd like passed to us
65507fe6a43SSeth Howell  * in bev_io structures as context. Here's where we specify how
65607fe6a43SSeth Howell  * much context we want per IO.
65707fe6a43SSeth Howell  */
65807fe6a43SSeth Howell static int
65907fe6a43SSeth Howell vbdev_crypto_get_ctx_size(void)
66007fe6a43SSeth Howell {
66107fe6a43SSeth Howell 	return sizeof(struct crypto_bdev_io);
66207fe6a43SSeth Howell }
66307fe6a43SSeth Howell 
66407fe6a43SSeth Howell static void
665491e6c43SShuhei Matsumoto vbdev_crypto_base_bdev_hotremove_cb(struct spdk_bdev *bdev_find)
66607fe6a43SSeth Howell {
66707fe6a43SSeth Howell 	struct vbdev_crypto *crypto_bdev, *tmp;
66807fe6a43SSeth Howell 
66907fe6a43SSeth Howell 	TAILQ_FOREACH_SAFE(crypto_bdev, &g_vbdev_crypto, link, tmp) {
67007fe6a43SSeth Howell 		if (bdev_find == crypto_bdev->base_bdev) {
67107fe6a43SSeth Howell 			spdk_bdev_unregister(&crypto_bdev->crypto_bdev, NULL, NULL);
67207fe6a43SSeth Howell 		}
67307fe6a43SSeth Howell 	}
67407fe6a43SSeth Howell }
67507fe6a43SSeth Howell 
676*ed43989aSVasilii Ivanov static void
677*ed43989aSVasilii Ivanov vbdev_crypto_base_bdev_resize_cb(struct spdk_bdev *bdev_find)
678*ed43989aSVasilii Ivanov {
679*ed43989aSVasilii Ivanov 	struct vbdev_crypto *crypto_bdev;
680*ed43989aSVasilii Ivanov 
681*ed43989aSVasilii Ivanov 	TAILQ_FOREACH(crypto_bdev, &g_vbdev_crypto, link) {
682*ed43989aSVasilii Ivanov 		if (bdev_find == crypto_bdev->base_bdev) {
683*ed43989aSVasilii Ivanov 			spdk_bdev_notify_blockcnt_change(&crypto_bdev->crypto_bdev, bdev_find->blockcnt);
684*ed43989aSVasilii Ivanov 		}
685*ed43989aSVasilii Ivanov 	}
686*ed43989aSVasilii Ivanov }
687*ed43989aSVasilii Ivanov 
688491e6c43SShuhei Matsumoto /* Called when the underlying base bdev triggers asynchronous event such as bdev removal. */
689491e6c43SShuhei Matsumoto static void
690491e6c43SShuhei Matsumoto vbdev_crypto_base_bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev,
691491e6c43SShuhei Matsumoto 				void *event_ctx)
692491e6c43SShuhei Matsumoto {
693491e6c43SShuhei Matsumoto 	switch (type) {
694491e6c43SShuhei Matsumoto 	case SPDK_BDEV_EVENT_REMOVE:
695491e6c43SShuhei Matsumoto 		vbdev_crypto_base_bdev_hotremove_cb(bdev);
696491e6c43SShuhei Matsumoto 		break;
697*ed43989aSVasilii Ivanov 	case SPDK_BDEV_EVENT_RESIZE:
698*ed43989aSVasilii Ivanov 		vbdev_crypto_base_bdev_resize_cb(bdev);
699*ed43989aSVasilii Ivanov 		break;
700491e6c43SShuhei Matsumoto 	default:
701491e6c43SShuhei Matsumoto 		SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type);
702491e6c43SShuhei Matsumoto 		break;
703491e6c43SShuhei Matsumoto 	}
704491e6c43SShuhei Matsumoto }
705491e6c43SShuhei Matsumoto 
70643cf39beSKonrad Sztyber static int
70743cf39beSKonrad Sztyber vbdev_crypto_get_memory_domains(void *ctx, struct spdk_memory_domain **domains, int array_size)
70843cf39beSKonrad Sztyber {
709b30c1383SAlexey Marchuk 	struct spdk_memory_domain **accel_domains = NULL;
710b30c1383SAlexey Marchuk 	int num_domains = 0, accel_rc, accel_array_size = 0;
71143cf39beSKonrad Sztyber 
712b30c1383SAlexey Marchuk 	/* Report generic accel and encryption module's memory domains */
713b30c1383SAlexey Marchuk 	if (domains && num_domains < array_size) {
71443cf39beSKonrad Sztyber 		domains[num_domains] = spdk_accel_get_memory_domain();
71543cf39beSKonrad Sztyber 	}
71643cf39beSKonrad Sztyber 
717b30c1383SAlexey Marchuk 	num_domains++;
718b30c1383SAlexey Marchuk 	if (domains && num_domains < array_size) {
719b30c1383SAlexey Marchuk 		accel_domains = domains + num_domains;
720b30c1383SAlexey Marchuk 		accel_array_size = array_size - num_domains;
721b30c1383SAlexey Marchuk 	}
722b30c1383SAlexey Marchuk 	accel_rc = spdk_accel_get_opc_memory_domains(SPDK_ACCEL_OPC_ENCRYPT, accel_domains,
723b30c1383SAlexey Marchuk 			accel_array_size);
724b30c1383SAlexey Marchuk 	if (accel_rc > 0) {
725b30c1383SAlexey Marchuk 		num_domains += accel_rc;
726b30c1383SAlexey Marchuk 	}
727b30c1383SAlexey Marchuk 
728b30c1383SAlexey Marchuk 	return num_domains;
72943cf39beSKonrad Sztyber }
73043cf39beSKonrad Sztyber 
7316a86385dSKonrad Sztyber static bool
7326a86385dSKonrad Sztyber vbdev_crypto_sequence_supported(void *ctx, enum spdk_bdev_io_type type)
7336a86385dSKonrad Sztyber {
7346a86385dSKonrad Sztyber 	switch (type) {
7356a86385dSKonrad Sztyber 	case SPDK_BDEV_IO_TYPE_READ:
7366a86385dSKonrad Sztyber 	case SPDK_BDEV_IO_TYPE_WRITE:
7376a86385dSKonrad Sztyber 		return true;
7386a86385dSKonrad Sztyber 	default:
7396a86385dSKonrad Sztyber 		return false;
7406a86385dSKonrad Sztyber 	}
7416a86385dSKonrad Sztyber }
7426a86385dSKonrad Sztyber 
74307fe6a43SSeth Howell /* When we register our bdev this is how we specify our entry points. */
74407fe6a43SSeth Howell static const struct spdk_bdev_fn_table vbdev_crypto_fn_table = {
74507fe6a43SSeth Howell 	.destruct			= vbdev_crypto_destruct,
74607fe6a43SSeth Howell 	.submit_request			= vbdev_crypto_submit_request,
74707fe6a43SSeth Howell 	.io_type_supported		= vbdev_crypto_io_type_supported,
74807fe6a43SSeth Howell 	.get_io_channel			= vbdev_crypto_get_io_channel,
74907fe6a43SSeth Howell 	.dump_info_json			= vbdev_crypto_dump_info_json,
75043cf39beSKonrad Sztyber 	.get_memory_domains		= vbdev_crypto_get_memory_domains,
7516a86385dSKonrad Sztyber 	.accel_sequence_supported	= vbdev_crypto_sequence_supported,
75207fe6a43SSeth Howell };
75307fe6a43SSeth Howell 
75407fe6a43SSeth Howell static struct spdk_bdev_module crypto_if = {
75507fe6a43SSeth Howell 	.name = "crypto",
75607fe6a43SSeth Howell 	.module_init = vbdev_crypto_init,
75707fe6a43SSeth Howell 	.get_ctx_size = vbdev_crypto_get_ctx_size,
75807fe6a43SSeth Howell 	.examine_config = vbdev_crypto_examine,
75907fe6a43SSeth Howell 	.module_fini = vbdev_crypto_finish,
76007fe6a43SSeth Howell 	.config_json = vbdev_crypto_config_json
76107fe6a43SSeth Howell };
76207fe6a43SSeth Howell 
76307fe6a43SSeth Howell SPDK_BDEV_MODULE_REGISTER(crypto, &crypto_if)
76407fe6a43SSeth Howell 
76507fe6a43SSeth Howell static int
766491e6c43SShuhei Matsumoto vbdev_crypto_claim(const char *bdev_name)
76707fe6a43SSeth Howell {
76807fe6a43SSeth Howell 	struct bdev_names *name;
76907fe6a43SSeth Howell 	struct vbdev_crypto *vbdev;
770491e6c43SShuhei Matsumoto 	struct spdk_bdev *bdev;
771cc8347dcSAlexey Marchuk 	struct spdk_iobuf_opts iobuf_opts;
772d1ae714bSKonrad Sztyber 	struct spdk_accel_operation_exec_ctx opctx = {};
7732c09c37cSKrzysztof Karas 	struct spdk_uuid ns_uuid;
77407fe6a43SSeth Howell 	int rc = 0;
77507fe6a43SSeth Howell 
7762c09c37cSKrzysztof Karas 	spdk_uuid_parse(&ns_uuid, BDEV_CRYPTO_NAMESPACE_UUID);
7772c09c37cSKrzysztof Karas 
778cc8347dcSAlexey Marchuk 	/* Limit the max IO size by some reasonable value. Since in write operation we use aux buffer,
779cc8347dcSAlexey Marchuk 	 * let's set the limit to the large_bufsize value */
780194983eeSJohn Levon 	spdk_iobuf_get_opts(&iobuf_opts, sizeof(iobuf_opts));
781cc8347dcSAlexey Marchuk 
78207fe6a43SSeth Howell 	/* Check our list of names from config versus this bdev and if
78307fe6a43SSeth Howell 	 * there's a match, create the crypto_bdev & bdev accordingly.
78407fe6a43SSeth Howell 	 */
78507fe6a43SSeth Howell 	TAILQ_FOREACH(name, &g_bdev_names, link) {
7865ba9b78eSYuriy Umanets 		if (strcmp(name->opts->bdev_name, bdev_name) != 0) {
78707fe6a43SSeth Howell 			continue;
78807fe6a43SSeth Howell 		}
789491e6c43SShuhei Matsumoto 		SPDK_DEBUGLOG(vbdev_crypto, "Match on %s\n", bdev_name);
79007fe6a43SSeth Howell 
79107fe6a43SSeth Howell 		vbdev = calloc(1, sizeof(struct vbdev_crypto));
79207fe6a43SSeth Howell 		if (!vbdev) {
7934a11579eSYuriy Umanets 			SPDK_ERRLOG("Failed to allocate memory for crypto_bdev.\n");
79413f97e67SAlexey Marchuk 			return -ENOMEM;
79507fe6a43SSeth Howell 		}
7965ba9b78eSYuriy Umanets 		vbdev->crypto_bdev.product_name = "crypto";
79707fe6a43SSeth Howell 
7985ba9b78eSYuriy Umanets 		vbdev->crypto_bdev.name = strdup(name->opts->vbdev_name);
79907fe6a43SSeth Howell 		if (!vbdev->crypto_bdev.name) {
8004a11579eSYuriy Umanets 			SPDK_ERRLOG("Failed to allocate memory for crypto_bdev name.\n");
80107fe6a43SSeth Howell 			rc = -ENOMEM;
80207fe6a43SSeth Howell 			goto error_bdev_name;
80307fe6a43SSeth Howell 		}
80407fe6a43SSeth Howell 
805491e6c43SShuhei Matsumoto 		rc = spdk_bdev_open_ext(bdev_name, true, vbdev_crypto_base_bdev_event_cb,
806491e6c43SShuhei Matsumoto 					NULL, &vbdev->base_desc);
807491e6c43SShuhei Matsumoto 		if (rc) {
808491e6c43SShuhei Matsumoto 			if (rc != -ENODEV) {
8094a11579eSYuriy Umanets 				SPDK_ERRLOG("Failed to open bdev %s: error %d\n", bdev_name, rc);
810491e6c43SShuhei Matsumoto 			}
811491e6c43SShuhei Matsumoto 			goto error_open;
812491e6c43SShuhei Matsumoto 		}
813491e6c43SShuhei Matsumoto 
814491e6c43SShuhei Matsumoto 		bdev = spdk_bdev_desc_get_bdev(vbdev->base_desc);
815491e6c43SShuhei Matsumoto 		vbdev->base_bdev = bdev;
816491e6c43SShuhei Matsumoto 
81707fe6a43SSeth Howell 		vbdev->crypto_bdev.write_cache = bdev->write_cache;
818fc5be175SKonrad Sztyber 		vbdev->crypto_bdev.optimal_io_boundary = bdev->optimal_io_boundary;
819fc5be175SKonrad Sztyber 		vbdev->crypto_bdev.max_rw_size = spdk_min(
820fc5be175SKonrad Sztyber 				bdev->max_rw_size ? bdev->max_rw_size : UINT32_MAX,
821fc5be175SKonrad Sztyber 				iobuf_opts.large_bufsize / bdev->blocklen);
822d1ae714bSKonrad Sztyber 
823d1ae714bSKonrad Sztyber 		opctx.size = SPDK_SIZEOF(&opctx, block_size);
824d1ae714bSKonrad Sztyber 		opctx.block_size = bdev->blocklen;
825d1ae714bSKonrad Sztyber 		vbdev->crypto_bdev.required_alignment =
826d1ae714bSKonrad Sztyber 			spdk_max(bdev->required_alignment,
827d1ae714bSKonrad Sztyber 				 spdk_max(spdk_accel_get_buf_align(SPDK_ACCEL_OPC_ENCRYPT, &opctx),
828d1ae714bSKonrad Sztyber 					  spdk_accel_get_buf_align(SPDK_ACCEL_OPC_DECRYPT, &opctx)));
829d1ae714bSKonrad Sztyber 
83007fe6a43SSeth Howell 		vbdev->crypto_bdev.blocklen = bdev->blocklen;
83107fe6a43SSeth Howell 		vbdev->crypto_bdev.blockcnt = bdev->blockcnt;
83207fe6a43SSeth Howell 
83307fe6a43SSeth Howell 		/* This is the context that is passed to us when the bdev
83407fe6a43SSeth Howell 		 * layer calls in so we'll save our crypto_bdev node here.
83507fe6a43SSeth Howell 		 */
83607fe6a43SSeth Howell 		vbdev->crypto_bdev.ctxt = vbdev;
83707fe6a43SSeth Howell 		vbdev->crypto_bdev.fn_table = &vbdev_crypto_fn_table;
83807fe6a43SSeth Howell 		vbdev->crypto_bdev.module = &crypto_if;
8395ba9b78eSYuriy Umanets 
8405ba9b78eSYuriy Umanets 		/* Assign crypto opts from the name. The pointer is valid up to the point
8415ba9b78eSYuriy Umanets 		 * the module is unloaded and all names removed from the list. */
8425ba9b78eSYuriy Umanets 		vbdev->opts = name->opts;
8435ba9b78eSYuriy Umanets 
8442c09c37cSKrzysztof Karas 		/* Generate UUID based on namespace UUID + base bdev UUID */
8452c09c37cSKrzysztof Karas 		rc = spdk_uuid_generate_sha1(&vbdev->crypto_bdev.uuid, &ns_uuid,
8462c09c37cSKrzysztof Karas 					     (const char *)&vbdev->base_bdev->uuid, sizeof(struct spdk_uuid));
8472c09c37cSKrzysztof Karas 		if (rc) {
8482c09c37cSKrzysztof Karas 			SPDK_ERRLOG("Unable to generate new UUID for crypto bdev\n");
8492c09c37cSKrzysztof Karas 			goto error_uuid;
8502c09c37cSKrzysztof Karas 		}
8512c09c37cSKrzysztof Karas 
85207fe6a43SSeth Howell 		TAILQ_INSERT_TAIL(&g_vbdev_crypto, vbdev, link);
85307fe6a43SSeth Howell 
85407fe6a43SSeth Howell 		spdk_io_device_register(vbdev, crypto_bdev_ch_create_cb, crypto_bdev_ch_destroy_cb,
85507fe6a43SSeth Howell 					sizeof(struct crypto_io_channel), vbdev->crypto_bdev.name);
85607fe6a43SSeth Howell 
857b3be320dSGangCao 		/* Save the thread where the base device is opened */
858b3be320dSGangCao 		vbdev->thread = spdk_get_thread();
859b3be320dSGangCao 
86007fe6a43SSeth Howell 		rc = spdk_bdev_module_claim_bdev(bdev, vbdev->base_desc, vbdev->crypto_bdev.module);
86107fe6a43SSeth Howell 		if (rc) {
8624a11579eSYuriy Umanets 			SPDK_ERRLOG("Failed to claim bdev %s\n", spdk_bdev_get_name(bdev));
86307fe6a43SSeth Howell 			goto error_claim;
86407fe6a43SSeth Howell 		}
86507fe6a43SSeth Howell 
86607fe6a43SSeth Howell 		rc = spdk_bdev_register(&vbdev->crypto_bdev);
86707fe6a43SSeth Howell 		if (rc < 0) {
8684a11579eSYuriy Umanets 			SPDK_ERRLOG("Failed to register vbdev: error %d\n", rc);
86907fe6a43SSeth Howell 			rc = -EINVAL;
87007fe6a43SSeth Howell 			goto error_bdev_register;
87107fe6a43SSeth Howell 		}
8725ba9b78eSYuriy Umanets 		SPDK_DEBUGLOG(vbdev_crypto, "Registered io_device and virtual bdev for: %s\n",
8735ba9b78eSYuriy Umanets 			      vbdev->opts->vbdev_name);
87407fe6a43SSeth Howell 		break;
87507fe6a43SSeth Howell 	}
87607fe6a43SSeth Howell 
87707fe6a43SSeth Howell 	return rc;
87807fe6a43SSeth Howell 
87907fe6a43SSeth Howell 	/* Error cleanup paths. */
88007fe6a43SSeth Howell error_bdev_register:
8813d0bae35SYuriy Umanets 	spdk_bdev_module_release_bdev(vbdev->base_bdev);
88207fe6a43SSeth Howell error_claim:
88307fe6a43SSeth Howell 	TAILQ_REMOVE(&g_vbdev_crypto, vbdev, link);
88407fe6a43SSeth Howell 	spdk_io_device_unregister(vbdev, NULL);
8852c09c37cSKrzysztof Karas error_uuid:
88645f24aebSYuriy Umanets 	spdk_bdev_close(vbdev->base_desc);
88745f24aebSYuriy Umanets error_open:
88807fe6a43SSeth Howell 	free(vbdev->crypto_bdev.name);
88907fe6a43SSeth Howell error_bdev_name:
89007fe6a43SSeth Howell 	free(vbdev);
89113f97e67SAlexey Marchuk 
89207fe6a43SSeth Howell 	return rc;
89307fe6a43SSeth Howell }
89407fe6a43SSeth Howell 
8953878371aSKrzysztof Karas struct crypto_delete_disk_ctx {
8963878371aSKrzysztof Karas 	spdk_delete_crypto_complete cb_fn;
8973878371aSKrzysztof Karas 	void *cb_arg;
8983878371aSKrzysztof Karas 	char *bdev_name;
8993878371aSKrzysztof Karas };
9003878371aSKrzysztof Karas 
9013878371aSKrzysztof Karas static void
9023878371aSKrzysztof Karas delete_crypto_disk_bdev_name(void *ctx, int rc)
9033878371aSKrzysztof Karas {
9043878371aSKrzysztof Karas 	struct bdev_names *name;
9053878371aSKrzysztof Karas 	struct crypto_delete_disk_ctx *disk_ctx = ctx;
9063878371aSKrzysztof Karas 
9073878371aSKrzysztof Karas 	/* Remove the association (vbdev, bdev) from g_bdev_names. This is required so that the
9083878371aSKrzysztof Karas 	 * vbdev does not get re-created if the same bdev is constructed at some other time,
9093878371aSKrzysztof Karas 	 * unless the underlying bdev was hot-removed. */
9103878371aSKrzysztof Karas 	TAILQ_FOREACH(name, &g_bdev_names, link) {
9113878371aSKrzysztof Karas 		if (strcmp(name->opts->vbdev_name, disk_ctx->bdev_name) == 0) {
9123878371aSKrzysztof Karas 			vbdev_crypto_delete_name(name);
9133878371aSKrzysztof Karas 			break;
9143878371aSKrzysztof Karas 		}
9153878371aSKrzysztof Karas 	}
9163878371aSKrzysztof Karas 
9173878371aSKrzysztof Karas 	disk_ctx->cb_fn(disk_ctx->cb_arg, rc);
9183878371aSKrzysztof Karas 
9193878371aSKrzysztof Karas 	free(disk_ctx->bdev_name);
9203878371aSKrzysztof Karas 	free(disk_ctx);
9213878371aSKrzysztof Karas }
9223878371aSKrzysztof Karas 
92307fe6a43SSeth Howell /* RPC entry for deleting a crypto vbdev. */
92407fe6a43SSeth Howell void
9254573e4ccSShuhei Matsumoto delete_crypto_disk(const char *bdev_name, spdk_delete_crypto_complete cb_fn,
92607fe6a43SSeth Howell 		   void *cb_arg)
92707fe6a43SSeth Howell {
9284573e4ccSShuhei Matsumoto 	int rc;
9293878371aSKrzysztof Karas 	struct crypto_delete_disk_ctx *ctx;
93007fe6a43SSeth Howell 
9313878371aSKrzysztof Karas 	ctx = calloc(1, sizeof(struct crypto_delete_disk_ctx));
9323878371aSKrzysztof Karas 	if (!ctx) {
9333878371aSKrzysztof Karas 		SPDK_ERRLOG("Failed to allocate delete crypto disk ctx\n");
9343878371aSKrzysztof Karas 		cb_fn(cb_arg, -ENOMEM);
9353878371aSKrzysztof Karas 		return;
9363878371aSKrzysztof Karas 	}
9373878371aSKrzysztof Karas 
9383878371aSKrzysztof Karas 	ctx->bdev_name = strdup(bdev_name);
9393878371aSKrzysztof Karas 	if (!ctx->bdev_name) {
9403878371aSKrzysztof Karas 		SPDK_ERRLOG("Failed to copy bdev_name\n");
9413878371aSKrzysztof Karas 		free(ctx);
9423878371aSKrzysztof Karas 		cb_fn(cb_arg, -ENOMEM);
9433878371aSKrzysztof Karas 		return;
9443878371aSKrzysztof Karas 	}
9453878371aSKrzysztof Karas 	ctx->cb_arg = cb_arg;
9463878371aSKrzysztof Karas 	ctx->cb_fn = cb_fn;
9474573e4ccSShuhei Matsumoto 	/* Some cleanup happens in the destruct callback. */
9483878371aSKrzysztof Karas 	rc = spdk_bdev_unregister_by_name(bdev_name, &crypto_if, delete_crypto_disk_bdev_name, ctx);
9493878371aSKrzysztof Karas 	if (rc != 0) {
9503878371aSKrzysztof Karas 		SPDK_ERRLOG("Encountered an error during bdev unregistration\n");
9514573e4ccSShuhei Matsumoto 		cb_fn(cb_arg, rc);
9523878371aSKrzysztof Karas 		free(ctx->bdev_name);
9533878371aSKrzysztof Karas 		free(ctx);
9544573e4ccSShuhei Matsumoto 	}
95507fe6a43SSeth Howell }
95607fe6a43SSeth Howell 
95707fe6a43SSeth Howell /* Because we specified this function in our crypto bdev function table when we
95807fe6a43SSeth Howell  * registered our crypto bdev, we'll get this call anytime a new bdev shows up.
95907fe6a43SSeth Howell  * Here we need to decide if we care about it and if so what to do. We
96007fe6a43SSeth Howell  * parsed the config file at init so we check the new bdev against the list
96107fe6a43SSeth Howell  * we built up at that time and if the user configured us to attach to this
96207fe6a43SSeth Howell  * bdev, here's where we do it.
96307fe6a43SSeth Howell  */
96407fe6a43SSeth Howell static void
96507fe6a43SSeth Howell vbdev_crypto_examine(struct spdk_bdev *bdev)
96607fe6a43SSeth Howell {
967491e6c43SShuhei Matsumoto 	vbdev_crypto_claim(spdk_bdev_get_name(bdev));
96807fe6a43SSeth Howell 	spdk_bdev_module_examine_done(&crypto_if);
96907fe6a43SSeth Howell }
97007fe6a43SSeth Howell 
9712172c432STomasz Zawadzki SPDK_LOG_REGISTER_COMPONENT(vbdev_crypto)
972