xref: /spdk/module/bdev/passthru/vbdev_passthru.c (revision 01a2c48555cd841ca5d1730242c42d6737654f3a)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2018 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 /*
8  * This is a simple example of a virtual block device module that passes IO
9  * down to a bdev (or bdevs) that its configured to attach to.
10  */
11 
12 #include "spdk/stdinc.h"
13 
14 #include "vbdev_passthru.h"
15 #include "spdk/rpc.h"
16 #include "spdk/env.h"
17 #include "spdk/endian.h"
18 #include "spdk/string.h"
19 #include "spdk/thread.h"
20 #include "spdk/util.h"
21 
22 #include "spdk/bdev_module.h"
23 #include "spdk/log.h"
24 
25 /* This namespace UUID was generated using uuid_generate() method. */
26 #define BDEV_PASSTHRU_NAMESPACE_UUID "7e25812e-c8c0-4d3f-8599-16d790555b85"
27 
28 static int vbdev_passthru_init(void);
29 static int vbdev_passthru_get_ctx_size(void);
30 static void vbdev_passthru_examine(struct spdk_bdev *bdev);
31 static void vbdev_passthru_finish(void);
32 static int vbdev_passthru_config_json(struct spdk_json_write_ctx *w);
33 
34 static struct spdk_bdev_module passthru_if = {
35 	.name = "passthru",
36 	.module_init = vbdev_passthru_init,
37 	.get_ctx_size = vbdev_passthru_get_ctx_size,
38 	.examine_config = vbdev_passthru_examine,
39 	.module_fini = vbdev_passthru_finish,
40 	.config_json = vbdev_passthru_config_json
41 };
42 
43 SPDK_BDEV_MODULE_REGISTER(passthru, &passthru_if)
44 
45 /* List of pt_bdev names and their base bdevs via configuration file.
46  * Used so we can parse the conf once at init and use this list in examine().
47  */
48 struct bdev_names {
49 	char			*vbdev_name;
50 	char			*bdev_name;
51 	struct spdk_uuid	uuid;
52 	TAILQ_ENTRY(bdev_names)	link;
53 };
54 static TAILQ_HEAD(, bdev_names) g_bdev_names = TAILQ_HEAD_INITIALIZER(g_bdev_names);
55 
56 /* List of virtual bdevs and associated info for each. */
57 struct vbdev_passthru {
58 	struct spdk_bdev		*base_bdev; /* the thing we're attaching to */
59 	struct spdk_bdev_desc		*base_desc; /* its descriptor we get from open */
60 	struct spdk_bdev		pt_bdev;    /* the PT virtual bdev */
61 	TAILQ_ENTRY(vbdev_passthru)	link;
62 	struct spdk_thread		*thread;    /* thread where base device is opened */
63 };
64 static TAILQ_HEAD(, vbdev_passthru) g_pt_nodes = TAILQ_HEAD_INITIALIZER(g_pt_nodes);
65 
66 /* The pt vbdev channel struct. It is allocated and freed on my behalf by the io channel code.
67  * If this vbdev needed to implement a poller or a queue for IO, this is where those things
68  * would be defined. This passthru bdev doesn't actually need to allocate a channel, it could
69  * simply pass back the channel of the bdev underneath it but for example purposes we will
70  * present its own to the upper layers.
71  */
72 struct pt_io_channel {
73 	struct spdk_io_channel	*base_ch; /* IO channel of base device */
74 };
75 
76 /* Just for fun, this pt_bdev module doesn't need it but this is essentially a per IO
77  * context that we get handed by the bdev layer.
78  */
79 struct passthru_bdev_io {
80 	uint8_t test;
81 
82 	/* bdev related */
83 	struct spdk_io_channel *ch;
84 
85 	/* for bdev_io_wait */
86 	struct spdk_bdev_io_wait_entry bdev_io_wait;
87 };
88 
89 static void vbdev_passthru_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io);
90 
91 
92 /* Callback for unregistering the IO device. */
93 static void
94 _device_unregister_cb(void *io_device)
95 {
96 	struct vbdev_passthru *pt_node  = io_device;
97 
98 	/* Done with this pt_node. */
99 	free(pt_node->pt_bdev.name);
100 	free(pt_node);
101 }
102 
103 /* Wrapper for the bdev close operation. */
104 static void
105 _vbdev_passthru_destruct(void *ctx)
106 {
107 	struct spdk_bdev_desc *desc = ctx;
108 
109 	spdk_bdev_close(desc);
110 }
111 
112 /* Called after we've unregistered following a hot remove callback.
113  * Our finish entry point will be called next.
114  */
115 static int
116 vbdev_passthru_destruct(void *ctx)
117 {
118 	struct vbdev_passthru *pt_node = (struct vbdev_passthru *)ctx;
119 
120 	/* It is important to follow this exact sequence of steps for destroying
121 	 * a vbdev...
122 	 */
123 
124 	TAILQ_REMOVE(&g_pt_nodes, pt_node, link);
125 
126 	/* Unclaim the underlying bdev. */
127 	spdk_bdev_module_release_bdev(pt_node->base_bdev);
128 
129 	/* Close the underlying bdev on its same opened thread. */
130 	if (pt_node->thread && pt_node->thread != spdk_get_thread()) {
131 		spdk_thread_send_msg(pt_node->thread, _vbdev_passthru_destruct, pt_node->base_desc);
132 	} else {
133 		spdk_bdev_close(pt_node->base_desc);
134 	}
135 
136 	/* Unregister the io_device. */
137 	spdk_io_device_unregister(pt_node, _device_unregister_cb);
138 
139 	return 0;
140 }
141 
142 /* Completion callback for IO that were issued from this bdev. The original bdev_io
143  * is passed in as an arg so we'll complete that one with the appropriate status
144  * and then free the one that this module issued.
145  */
146 static void
147 _pt_complete_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
148 {
149 	struct spdk_bdev_io *orig_io = cb_arg;
150 	int status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
151 	struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)orig_io->driver_ctx;
152 
153 	/* We setup this value in the submission routine, just showing here that it is
154 	 * passed back to us.
155 	 */
156 	if (io_ctx->test != 0x5a) {
157 		SPDK_ERRLOG("Error, original IO device_ctx is wrong! 0x%x\n",
158 			    io_ctx->test);
159 	}
160 
161 	/* Complete the original IO and then free the one that we created here
162 	 * as a result of issuing an IO via submit_request.
163 	 */
164 	spdk_bdev_io_complete(orig_io, status);
165 	spdk_bdev_free_io(bdev_io);
166 }
167 
168 static void
169 _pt_complete_zcopy_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
170 {
171 	struct spdk_bdev_io *orig_io = cb_arg;
172 	int status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
173 	struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)orig_io->driver_ctx;
174 
175 	/* We setup this value in the submission routine, just showing here that it is
176 	 * passed back to us.
177 	 */
178 	if (io_ctx->test != 0x5a) {
179 		SPDK_ERRLOG("Error, original IO device_ctx is wrong! 0x%x\n",
180 			    io_ctx->test);
181 	}
182 
183 	/* Complete the original IO and then free the one that we created here
184 	 * as a result of issuing an IO via submit_request.
185 	 */
186 	spdk_bdev_io_set_buf(orig_io, bdev_io->u.bdev.iovs[0].iov_base, bdev_io->u.bdev.iovs[0].iov_len);
187 	spdk_bdev_io_complete(orig_io, status);
188 	spdk_bdev_free_io(bdev_io);
189 }
190 
191 static void
192 vbdev_passthru_resubmit_io(void *arg)
193 {
194 	struct spdk_bdev_io *bdev_io = (struct spdk_bdev_io *)arg;
195 	struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)bdev_io->driver_ctx;
196 
197 	vbdev_passthru_submit_request(io_ctx->ch, bdev_io);
198 }
199 
200 static void
201 vbdev_passthru_queue_io(struct spdk_bdev_io *bdev_io)
202 {
203 	struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)bdev_io->driver_ctx;
204 	struct pt_io_channel *pt_ch = spdk_io_channel_get_ctx(io_ctx->ch);
205 	int rc;
206 
207 	io_ctx->bdev_io_wait.bdev = bdev_io->bdev;
208 	io_ctx->bdev_io_wait.cb_fn = vbdev_passthru_resubmit_io;
209 	io_ctx->bdev_io_wait.cb_arg = bdev_io;
210 
211 	/* Queue the IO using the channel of the base device. */
212 	rc = spdk_bdev_queue_io_wait(bdev_io->bdev, pt_ch->base_ch, &io_ctx->bdev_io_wait);
213 	if (rc != 0) {
214 		SPDK_ERRLOG("Queue io failed in vbdev_passthru_queue_io, rc=%d.\n", rc);
215 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
216 	}
217 }
218 
219 static void
220 pt_init_ext_io_opts(struct spdk_bdev_io *bdev_io, struct spdk_bdev_ext_io_opts *opts)
221 {
222 	memset(opts, 0, sizeof(*opts));
223 	opts->size = sizeof(*opts);
224 	opts->memory_domain = bdev_io->u.bdev.memory_domain;
225 	opts->memory_domain_ctx = bdev_io->u.bdev.memory_domain_ctx;
226 	opts->metadata = bdev_io->u.bdev.md_buf;
227 	opts->dif_check_flags_exclude_mask = ~bdev_io->u.bdev.dif_check_flags;
228 }
229 
230 /* Callback for getting a buf from the bdev pool in the event that the caller passed
231  * in NULL, we need to own the buffer so it doesn't get freed by another vbdev module
232  * beneath us before we're done with it. That won't happen in this example but it could
233  * if this example were used as a template for something more complex.
234  */
235 static void
236 pt_read_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success)
237 {
238 	struct vbdev_passthru *pt_node = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_passthru,
239 					 pt_bdev);
240 	struct pt_io_channel *pt_ch = spdk_io_channel_get_ctx(ch);
241 	struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)bdev_io->driver_ctx;
242 	struct spdk_bdev_ext_io_opts io_opts;
243 	int rc;
244 
245 	if (!success) {
246 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
247 		return;
248 	}
249 
250 	pt_init_ext_io_opts(bdev_io, &io_opts);
251 	rc = spdk_bdev_readv_blocks_ext(pt_node->base_desc, pt_ch->base_ch, bdev_io->u.bdev.iovs,
252 					bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks,
253 					bdev_io->u.bdev.num_blocks, _pt_complete_io,
254 					bdev_io, &io_opts);
255 	if (rc != 0) {
256 		if (rc == -ENOMEM) {
257 			SPDK_ERRLOG("No memory, start to queue io for passthru.\n");
258 			io_ctx->ch = ch;
259 			vbdev_passthru_queue_io(bdev_io);
260 		} else {
261 			SPDK_ERRLOG("ERROR on bdev_io submission!\n");
262 			spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
263 		}
264 	}
265 }
266 
267 /* Called when someone above submits IO to this pt vbdev. We're simply passing it on here
268  * via SPDK IO calls which in turn allocate another bdev IO and call our cpl callback provided
269  * below along with the original bdev_io so that we can complete it once this IO completes.
270  */
271 static void
272 vbdev_passthru_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
273 {
274 	struct vbdev_passthru *pt_node = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_passthru, pt_bdev);
275 	struct pt_io_channel *pt_ch = spdk_io_channel_get_ctx(ch);
276 	struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)bdev_io->driver_ctx;
277 	struct spdk_bdev_ext_io_opts io_opts;
278 	int rc = 0;
279 
280 	/* Setup a per IO context value; we don't do anything with it in the vbdev other
281 	 * than confirm we get the same thing back in the completion callback just to
282 	 * demonstrate.
283 	 */
284 	io_ctx->test = 0x5a;
285 
286 	switch (bdev_io->type) {
287 	case SPDK_BDEV_IO_TYPE_READ:
288 		spdk_bdev_io_get_buf(bdev_io, pt_read_get_buf_cb,
289 				     bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
290 		break;
291 	case SPDK_BDEV_IO_TYPE_WRITE:
292 		pt_init_ext_io_opts(bdev_io, &io_opts);
293 		rc = spdk_bdev_writev_blocks_ext(pt_node->base_desc, pt_ch->base_ch, bdev_io->u.bdev.iovs,
294 						 bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks,
295 						 bdev_io->u.bdev.num_blocks, _pt_complete_io,
296 						 bdev_io, &io_opts);
297 		break;
298 	case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
299 		rc = spdk_bdev_write_zeroes_blocks(pt_node->base_desc, pt_ch->base_ch,
300 						   bdev_io->u.bdev.offset_blocks,
301 						   bdev_io->u.bdev.num_blocks,
302 						   _pt_complete_io, bdev_io);
303 		break;
304 	case SPDK_BDEV_IO_TYPE_UNMAP:
305 		rc = spdk_bdev_unmap_blocks(pt_node->base_desc, pt_ch->base_ch,
306 					    bdev_io->u.bdev.offset_blocks,
307 					    bdev_io->u.bdev.num_blocks,
308 					    _pt_complete_io, bdev_io);
309 		break;
310 	case SPDK_BDEV_IO_TYPE_FLUSH:
311 		rc = spdk_bdev_flush_blocks(pt_node->base_desc, pt_ch->base_ch,
312 					    bdev_io->u.bdev.offset_blocks,
313 					    bdev_io->u.bdev.num_blocks,
314 					    _pt_complete_io, bdev_io);
315 		break;
316 	case SPDK_BDEV_IO_TYPE_RESET:
317 		rc = spdk_bdev_reset(pt_node->base_desc, pt_ch->base_ch,
318 				     _pt_complete_io, bdev_io);
319 		break;
320 	case SPDK_BDEV_IO_TYPE_ZCOPY:
321 		rc = spdk_bdev_zcopy_start(pt_node->base_desc, pt_ch->base_ch, NULL, 0,
322 					   bdev_io->u.bdev.offset_blocks,
323 					   bdev_io->u.bdev.num_blocks, bdev_io->u.bdev.zcopy.populate,
324 					   _pt_complete_zcopy_io, bdev_io);
325 		break;
326 	case SPDK_BDEV_IO_TYPE_ABORT:
327 		rc = spdk_bdev_abort(pt_node->base_desc, pt_ch->base_ch, bdev_io->u.abort.bio_to_abort,
328 				     _pt_complete_io, bdev_io);
329 		break;
330 	case SPDK_BDEV_IO_TYPE_COPY:
331 		rc = spdk_bdev_copy_blocks(pt_node->base_desc, pt_ch->base_ch,
332 					   bdev_io->u.bdev.offset_blocks,
333 					   bdev_io->u.bdev.copy.src_offset_blocks,
334 					   bdev_io->u.bdev.num_blocks,
335 					   _pt_complete_io, bdev_io);
336 		break;
337 	default:
338 		SPDK_ERRLOG("passthru: unknown I/O type %d\n", bdev_io->type);
339 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
340 		return;
341 	}
342 	if (rc != 0) {
343 		if (rc == -ENOMEM) {
344 			SPDK_ERRLOG("No memory, start to queue io for passthru.\n");
345 			io_ctx->ch = ch;
346 			vbdev_passthru_queue_io(bdev_io);
347 		} else {
348 			SPDK_ERRLOG("ERROR on bdev_io submission!\n");
349 			spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
350 		}
351 	}
352 }
353 
354 /* We'll just call the base bdev and let it answer however if we were more
355  * restrictive for some reason (or less) we could get the response back
356  * and modify according to our purposes.
357  */
358 static bool
359 vbdev_passthru_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type)
360 {
361 	struct vbdev_passthru *pt_node = (struct vbdev_passthru *)ctx;
362 
363 	return spdk_bdev_io_type_supported(pt_node->base_bdev, io_type);
364 }
365 
366 /* We supplied this as an entry point for upper layers who want to communicate to this
367  * bdev.  This is how they get a channel. We are passed the same context we provided when
368  * we created our PT vbdev in examine() which, for this bdev, is the address of one of
369  * our context nodes. From here we'll ask the SPDK channel code to fill out our channel
370  * struct and we'll keep it in our PT node.
371  */
372 static struct spdk_io_channel *
373 vbdev_passthru_get_io_channel(void *ctx)
374 {
375 	struct vbdev_passthru *pt_node = (struct vbdev_passthru *)ctx;
376 	struct spdk_io_channel *pt_ch = NULL;
377 
378 	/* The IO channel code will allocate a channel for us which consists of
379 	 * the SPDK channel structure plus the size of our pt_io_channel struct
380 	 * that we passed in when we registered our IO device. It will then call
381 	 * our channel create callback to populate any elements that we need to
382 	 * update.
383 	 */
384 	pt_ch = spdk_get_io_channel(pt_node);
385 
386 	return pt_ch;
387 }
388 
389 /* This is the output for bdev_get_bdevs() for this vbdev */
390 static int
391 vbdev_passthru_dump_info_json(void *ctx, struct spdk_json_write_ctx *w)
392 {
393 	struct vbdev_passthru *pt_node = (struct vbdev_passthru *)ctx;
394 
395 	spdk_json_write_name(w, "passthru");
396 	spdk_json_write_object_begin(w);
397 	spdk_json_write_named_string(w, "name", spdk_bdev_get_name(&pt_node->pt_bdev));
398 	spdk_json_write_named_string(w, "base_bdev_name", spdk_bdev_get_name(pt_node->base_bdev));
399 	spdk_json_write_object_end(w);
400 
401 	return 0;
402 }
403 
404 /* This is used to generate JSON that can configure this module to its current state. */
405 static int
406 vbdev_passthru_config_json(struct spdk_json_write_ctx *w)
407 {
408 	struct vbdev_passthru *pt_node;
409 
410 	TAILQ_FOREACH(pt_node, &g_pt_nodes, link) {
411 		const struct spdk_uuid *uuid = spdk_bdev_get_uuid(&pt_node->pt_bdev);
412 
413 		spdk_json_write_object_begin(w);
414 		spdk_json_write_named_string(w, "method", "bdev_passthru_create");
415 		spdk_json_write_named_object_begin(w, "params");
416 		spdk_json_write_named_string(w, "base_bdev_name", spdk_bdev_get_name(pt_node->base_bdev));
417 		spdk_json_write_named_string(w, "name", spdk_bdev_get_name(&pt_node->pt_bdev));
418 		if (!spdk_uuid_is_null(uuid)) {
419 			spdk_json_write_named_uuid(w, "uuid", uuid);
420 		}
421 		spdk_json_write_object_end(w);
422 		spdk_json_write_object_end(w);
423 	}
424 	return 0;
425 }
426 
427 /* We provide this callback for the SPDK channel code to create a channel using
428  * the channel struct we provided in our module get_io_channel() entry point. Here
429  * we get and save off an underlying base channel of the device below us so that
430  * we can communicate with the base bdev on a per channel basis.  If we needed
431  * our own poller for this vbdev, we'd register it here.
432  */
433 static int
434 pt_bdev_ch_create_cb(void *io_device, void *ctx_buf)
435 {
436 	struct pt_io_channel *pt_ch = ctx_buf;
437 	struct vbdev_passthru *pt_node = io_device;
438 
439 	pt_ch->base_ch = spdk_bdev_get_io_channel(pt_node->base_desc);
440 
441 	return 0;
442 }
443 
444 /* We provide this callback for the SPDK channel code to destroy a channel
445  * created with our create callback. We just need to undo anything we did
446  * when we created. If this bdev used its own poller, we'd unregister it here.
447  */
448 static void
449 pt_bdev_ch_destroy_cb(void *io_device, void *ctx_buf)
450 {
451 	struct pt_io_channel *pt_ch = ctx_buf;
452 
453 	spdk_put_io_channel(pt_ch->base_ch);
454 }
455 
456 /* Create the passthru association from the bdev and vbdev name and insert
457  * on the global list. */
458 static int
459 vbdev_passthru_insert_name(const char *bdev_name, const char *vbdev_name,
460 			   const struct spdk_uuid *uuid)
461 {
462 	struct bdev_names *name;
463 
464 	TAILQ_FOREACH(name, &g_bdev_names, link) {
465 		if (strcmp(vbdev_name, name->vbdev_name) == 0) {
466 			SPDK_ERRLOG("passthru bdev %s already exists\n", vbdev_name);
467 			return -EEXIST;
468 		}
469 	}
470 
471 	name = calloc(1, sizeof(struct bdev_names));
472 	if (!name) {
473 		SPDK_ERRLOG("could not allocate bdev_names\n");
474 		return -ENOMEM;
475 	}
476 
477 	name->bdev_name = strdup(bdev_name);
478 	if (!name->bdev_name) {
479 		SPDK_ERRLOG("could not allocate name->bdev_name\n");
480 		free(name);
481 		return -ENOMEM;
482 	}
483 
484 	name->vbdev_name = strdup(vbdev_name);
485 	if (!name->vbdev_name) {
486 		SPDK_ERRLOG("could not allocate name->vbdev_name\n");
487 		free(name->bdev_name);
488 		free(name);
489 		return -ENOMEM;
490 	}
491 
492 	spdk_uuid_copy(&name->uuid, uuid);
493 	TAILQ_INSERT_TAIL(&g_bdev_names, name, link);
494 
495 	return 0;
496 }
497 
498 /* On init, just perform bdev module specific initialization. */
499 static int
500 vbdev_passthru_init(void)
501 {
502 	return 0;
503 }
504 
505 /* Called when the entire module is being torn down. */
506 static void
507 vbdev_passthru_finish(void)
508 {
509 	struct bdev_names *name;
510 
511 	while ((name = TAILQ_FIRST(&g_bdev_names))) {
512 		TAILQ_REMOVE(&g_bdev_names, name, link);
513 		free(name->bdev_name);
514 		free(name->vbdev_name);
515 		free(name);
516 	}
517 }
518 
519 /* During init we'll be asked how much memory we'd like passed to us
520  * in bev_io structures as context. Here's where we specify how
521  * much context we want per IO.
522  */
523 static int
524 vbdev_passthru_get_ctx_size(void)
525 {
526 	return sizeof(struct passthru_bdev_io);
527 }
528 
529 /* Where vbdev_passthru_config_json() is used to generate per module JSON config data, this
530  * function is called to output any per bdev specific methods. For the PT module, there are
531  * none.
532  */
533 static void
534 vbdev_passthru_write_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w)
535 {
536 	/* No config per bdev needed */
537 }
538 
539 static int
540 vbdev_passthru_get_memory_domains(void *ctx, struct spdk_memory_domain **domains, int array_size)
541 {
542 	struct vbdev_passthru *pt_node = (struct vbdev_passthru *)ctx;
543 
544 	/* Passthru bdev doesn't work with data buffers, so it supports any memory domain used by base_bdev */
545 	return spdk_bdev_get_memory_domains(pt_node->base_bdev, domains, array_size);
546 }
547 
548 /* When we register our bdev this is how we specify our entry points. */
549 static const struct spdk_bdev_fn_table vbdev_passthru_fn_table = {
550 	.destruct		= vbdev_passthru_destruct,
551 	.submit_request		= vbdev_passthru_submit_request,
552 	.io_type_supported	= vbdev_passthru_io_type_supported,
553 	.get_io_channel		= vbdev_passthru_get_io_channel,
554 	.dump_info_json		= vbdev_passthru_dump_info_json,
555 	.write_config_json	= vbdev_passthru_write_config_json,
556 	.get_memory_domains	= vbdev_passthru_get_memory_domains,
557 };
558 
559 static void
560 vbdev_passthru_base_bdev_hotremove_cb(struct spdk_bdev *bdev_find)
561 {
562 	struct vbdev_passthru *pt_node, *tmp;
563 
564 	TAILQ_FOREACH_SAFE(pt_node, &g_pt_nodes, link, tmp) {
565 		if (bdev_find == pt_node->base_bdev) {
566 			spdk_bdev_unregister(&pt_node->pt_bdev, NULL, NULL);
567 		}
568 	}
569 }
570 
571 /* Called when the underlying base bdev triggers asynchronous event such as bdev removal. */
572 static void
573 vbdev_passthru_base_bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev,
574 				  void *event_ctx)
575 {
576 	switch (type) {
577 	case SPDK_BDEV_EVENT_REMOVE:
578 		vbdev_passthru_base_bdev_hotremove_cb(bdev);
579 		break;
580 	default:
581 		SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type);
582 		break;
583 	}
584 }
585 
586 /* Create and register the passthru vbdev if we find it in our list of bdev names.
587  * This can be called either by the examine path or RPC method.
588  */
589 static int
590 vbdev_passthru_register(const char *bdev_name)
591 {
592 	struct bdev_names *name;
593 	struct vbdev_passthru *pt_node;
594 	struct spdk_bdev *bdev;
595 	struct spdk_uuid ns_uuid;
596 	int rc = 0;
597 
598 	spdk_uuid_parse(&ns_uuid, BDEV_PASSTHRU_NAMESPACE_UUID);
599 
600 	/* Check our list of names from config versus this bdev and if
601 	 * there's a match, create the pt_node & bdev accordingly.
602 	 */
603 	TAILQ_FOREACH(name, &g_bdev_names, link) {
604 		if (strcmp(name->bdev_name, bdev_name) != 0) {
605 			continue;
606 		}
607 
608 		SPDK_NOTICELOG("Match on %s\n", bdev_name);
609 		pt_node = calloc(1, sizeof(struct vbdev_passthru));
610 		if (!pt_node) {
611 			rc = -ENOMEM;
612 			SPDK_ERRLOG("could not allocate pt_node\n");
613 			break;
614 		}
615 
616 		pt_node->pt_bdev.name = strdup(name->vbdev_name);
617 		if (!pt_node->pt_bdev.name) {
618 			rc = -ENOMEM;
619 			SPDK_ERRLOG("could not allocate pt_bdev name\n");
620 			free(pt_node);
621 			break;
622 		}
623 		pt_node->pt_bdev.product_name = "passthru";
624 
625 		/* The base bdev that we're attaching to. */
626 		rc = spdk_bdev_open_ext(bdev_name, true, vbdev_passthru_base_bdev_event_cb,
627 					NULL, &pt_node->base_desc);
628 		if (rc) {
629 			if (rc != -ENODEV) {
630 				SPDK_ERRLOG("could not open bdev %s\n", bdev_name);
631 			}
632 			free(pt_node->pt_bdev.name);
633 			free(pt_node);
634 			break;
635 		}
636 		SPDK_NOTICELOG("base bdev opened\n");
637 
638 		bdev = spdk_bdev_desc_get_bdev(pt_node->base_desc);
639 		pt_node->base_bdev = bdev;
640 
641 		if (!spdk_uuid_is_null(&name->uuid)) {
642 			/* Use the configured UUID */
643 			spdk_uuid_copy(&pt_node->pt_bdev.uuid, &name->uuid);
644 		} else {
645 			/* Generate UUID based on namespace UUID + base bdev UUID. */
646 			rc = spdk_uuid_generate_sha1(&pt_node->pt_bdev.uuid, &ns_uuid,
647 						     (const char *)&pt_node->base_bdev->uuid, sizeof(struct spdk_uuid));
648 			if (rc) {
649 				SPDK_ERRLOG("Unable to generate new UUID for passthru bdev\n");
650 				spdk_bdev_close(pt_node->base_desc);
651 				free(pt_node->pt_bdev.name);
652 				free(pt_node);
653 				break;
654 			}
655 		}
656 
657 		/* Copy some properties from the underlying base bdev. */
658 		pt_node->pt_bdev.write_cache = bdev->write_cache;
659 		pt_node->pt_bdev.required_alignment = bdev->required_alignment;
660 		pt_node->pt_bdev.optimal_io_boundary = bdev->optimal_io_boundary;
661 		pt_node->pt_bdev.blocklen = bdev->blocklen;
662 		pt_node->pt_bdev.blockcnt = bdev->blockcnt;
663 
664 		pt_node->pt_bdev.md_interleave = bdev->md_interleave;
665 		pt_node->pt_bdev.md_len = bdev->md_len;
666 		pt_node->pt_bdev.dif_type = bdev->dif_type;
667 		pt_node->pt_bdev.dif_is_head_of_md = bdev->dif_is_head_of_md;
668 		pt_node->pt_bdev.dif_check_flags = bdev->dif_check_flags;
669 		pt_node->pt_bdev.dif_pi_format = bdev->dif_pi_format;
670 
671 		/* This is the context that is passed to us when the bdev
672 		 * layer calls in so we'll save our pt_bdev node here.
673 		 */
674 		pt_node->pt_bdev.ctxt = pt_node;
675 		pt_node->pt_bdev.fn_table = &vbdev_passthru_fn_table;
676 		pt_node->pt_bdev.module = &passthru_if;
677 		TAILQ_INSERT_TAIL(&g_pt_nodes, pt_node, link);
678 
679 		spdk_io_device_register(pt_node, pt_bdev_ch_create_cb, pt_bdev_ch_destroy_cb,
680 					sizeof(struct pt_io_channel),
681 					name->vbdev_name);
682 		SPDK_NOTICELOG("io_device created at: 0x%p\n", pt_node);
683 
684 		/* Save the thread where the base device is opened */
685 		pt_node->thread = spdk_get_thread();
686 
687 		rc = spdk_bdev_module_claim_bdev(bdev, pt_node->base_desc, pt_node->pt_bdev.module);
688 		if (rc) {
689 			SPDK_ERRLOG("could not claim bdev %s\n", bdev_name);
690 			spdk_bdev_close(pt_node->base_desc);
691 			TAILQ_REMOVE(&g_pt_nodes, pt_node, link);
692 			spdk_io_device_unregister(pt_node, NULL);
693 			free(pt_node->pt_bdev.name);
694 			free(pt_node);
695 			break;
696 		}
697 		SPDK_NOTICELOG("bdev claimed\n");
698 
699 		rc = spdk_bdev_register(&pt_node->pt_bdev);
700 		if (rc) {
701 			SPDK_ERRLOG("could not register pt_bdev\n");
702 			spdk_bdev_module_release_bdev(&pt_node->pt_bdev);
703 			spdk_bdev_close(pt_node->base_desc);
704 			TAILQ_REMOVE(&g_pt_nodes, pt_node, link);
705 			spdk_io_device_unregister(pt_node, NULL);
706 			free(pt_node->pt_bdev.name);
707 			free(pt_node);
708 			break;
709 		}
710 		SPDK_NOTICELOG("pt_bdev registered\n");
711 		SPDK_NOTICELOG("created pt_bdev for: %s\n", name->vbdev_name);
712 	}
713 
714 	return rc;
715 }
716 
717 /* Create the passthru disk from the given bdev and vbdev name. */
718 int
719 bdev_passthru_create_disk(const char *bdev_name, const char *vbdev_name,
720 			  const struct spdk_uuid *uuid)
721 {
722 	int rc;
723 
724 	/* Insert the bdev name into our global name list even if it doesn't exist yet,
725 	 * it may show up soon...
726 	 */
727 	rc = vbdev_passthru_insert_name(bdev_name, vbdev_name, uuid);
728 	if (rc) {
729 		return rc;
730 	}
731 
732 	rc = vbdev_passthru_register(bdev_name);
733 	if (rc == -ENODEV) {
734 		/* This is not an error, we tracked the name above and it still
735 		 * may show up later.
736 		 */
737 		SPDK_NOTICELOG("vbdev creation deferred pending base bdev arrival\n");
738 		rc = 0;
739 	}
740 
741 	return rc;
742 }
743 
744 void
745 bdev_passthru_delete_disk(const char *bdev_name, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
746 {
747 	struct bdev_names *name;
748 	int rc;
749 
750 	/* Some cleanup happens in the destruct callback. */
751 	rc = spdk_bdev_unregister_by_name(bdev_name, &passthru_if, cb_fn, cb_arg);
752 	if (rc == 0) {
753 		/* Remove the association (vbdev, bdev) from g_bdev_names. This is required so that the
754 		 * vbdev does not get re-created if the same bdev is constructed at some other time,
755 		 * unless the underlying bdev was hot-removed.
756 		 */
757 		TAILQ_FOREACH(name, &g_bdev_names, link) {
758 			if (strcmp(name->vbdev_name, bdev_name) == 0) {
759 				TAILQ_REMOVE(&g_bdev_names, name, link);
760 				free(name->bdev_name);
761 				free(name->vbdev_name);
762 				free(name);
763 				break;
764 			}
765 		}
766 	} else {
767 		cb_fn(cb_arg, rc);
768 	}
769 }
770 
771 /* Because we specified this function in our pt bdev function table when we
772  * registered our pt bdev, we'll get this call anytime a new bdev shows up.
773  * Here we need to decide if we care about it and if so what to do. We
774  * parsed the config file at init so we check the new bdev against the list
775  * we built up at that time and if the user configured us to attach to this
776  * bdev, here's where we do it.
777  */
778 static void
779 vbdev_passthru_examine(struct spdk_bdev *bdev)
780 {
781 	vbdev_passthru_register(bdev->name);
782 
783 	spdk_bdev_module_examine_done(&passthru_if);
784 }
785 
786 SPDK_LOG_REGISTER_COMPONENT(vbdev_passthru)
787