xref: /spdk/module/bdev/compress/vbdev_compress.c (revision beff2dedc047dec06b7e29d038b607a3206a5da9)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2018 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "vbdev_compress.h"
8 
9 #include "spdk/reduce.h"
10 #include "spdk/stdinc.h"
11 #include "spdk/rpc.h"
12 #include "spdk/env.h"
13 #include "spdk/endian.h"
14 #include "spdk/string.h"
15 #include "spdk/thread.h"
16 #include "spdk/util.h"
17 #include "spdk/bdev_module.h"
18 #include "spdk/likely.h"
19 #include "spdk/log.h"
20 #include "spdk/accel.h"
21 
22 #include "spdk/accel_module.h"
23 
24 
25 #define CHUNK_SIZE (1024 * 16)
26 #define COMP_BDEV_NAME "compress"
27 #define BACKING_IO_SZ (4 * 1024)
28 
29 /* This namespace UUID was generated using uuid_generate() method. */
30 #define BDEV_COMPRESS_NAMESPACE_UUID "c3fad6da-832f-4cc0-9cdc-5c552b225e7b"
31 
32 struct vbdev_comp_delete_ctx {
33 	spdk_delete_compress_complete	cb_fn;
34 	void				*cb_arg;
35 	int				cb_rc;
36 	struct spdk_thread		*orig_thread;
37 };
38 
39 /* List of virtual bdevs and associated info for each. */
40 struct vbdev_compress {
41 	struct spdk_bdev		*base_bdev;	/* the thing we're attaching to */
42 	struct spdk_bdev_desc		*base_desc;	/* its descriptor we get from open */
43 	struct spdk_io_channel		*base_ch;	/* IO channel of base device */
44 	struct spdk_bdev		comp_bdev;	/* the compression virtual bdev */
45 	struct comp_io_channel		*comp_ch;	/* channel associated with this bdev */
46 	struct spdk_io_channel		*accel_channel;	/* to communicate with the accel framework */
47 	struct spdk_thread		*reduce_thread;
48 	pthread_mutex_t			reduce_lock;
49 	uint32_t			ch_count;
50 	TAILQ_HEAD(, spdk_bdev_io)	pending_comp_ios;	/* outstanding operations to a comp library */
51 	struct spdk_poller		*poller;	/* completion poller */
52 	struct spdk_reduce_vol_params	params;		/* params for the reduce volume */
53 	struct spdk_reduce_backing_dev	backing_dev;	/* backing device info for the reduce volume */
54 	struct spdk_reduce_vol		*vol;		/* the reduce volume */
55 	struct vbdev_comp_delete_ctx	*delete_ctx;
56 	bool				orphaned;	/* base bdev claimed but comp_bdev not registered */
57 	int				reduce_errno;
58 	TAILQ_HEAD(, vbdev_comp_op)	queued_comp_ops;
59 	TAILQ_ENTRY(vbdev_compress)	link;
60 	struct spdk_thread		*thread;	/* thread where base device is opened */
61 };
62 static TAILQ_HEAD(, vbdev_compress) g_vbdev_comp = TAILQ_HEAD_INITIALIZER(g_vbdev_comp);
63 
64 /* The comp vbdev channel struct. It is allocated and freed on my behalf by the io channel code.
65  */
66 struct comp_io_channel {
67 	struct spdk_io_channel_iter	*iter;	/* used with for_each_channel in reset */
68 };
69 
70 /* Per I/O context for the compression vbdev. */
71 struct comp_bdev_io {
72 	struct comp_io_channel		*comp_ch;		/* used in completion handling */
73 	struct vbdev_compress		*comp_bdev;		/* vbdev associated with this IO */
74 	struct spdk_bdev_io_wait_entry	bdev_io_wait;		/* for bdev_io_wait */
75 	struct spdk_bdev_io		*orig_io;		/* the original IO */
76 	struct spdk_io_channel		*ch;			/* for resubmission */
77 	int				status;			/* save for completion on orig thread */
78 };
79 
80 static void vbdev_compress_examine(struct spdk_bdev *bdev);
81 static int vbdev_compress_claim(struct vbdev_compress *comp_bdev);
82 static void vbdev_compress_queue_io(struct spdk_bdev_io *bdev_io);
83 struct vbdev_compress *_prepare_for_load_init(struct spdk_bdev_desc *bdev_desc, uint32_t lb_size);
84 static void vbdev_compress_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io);
85 static void comp_bdev_ch_destroy_cb(void *io_device, void *ctx_buf);
86 static void vbdev_compress_delete_done(void *cb_arg, int bdeverrno);
87 
88 /* for completing rw requests on the orig IO thread. */
89 static void
90 _reduce_rw_blocks_cb(void *arg)
91 {
92 	struct comp_bdev_io *io_ctx = arg;
93 
94 	if (spdk_likely(io_ctx->status == 0)) {
95 		spdk_bdev_io_complete(io_ctx->orig_io, SPDK_BDEV_IO_STATUS_SUCCESS);
96 	} else if (io_ctx->status == -ENOMEM) {
97 		vbdev_compress_queue_io(spdk_bdev_io_from_ctx(io_ctx));
98 	} else {
99 		SPDK_ERRLOG("Failed to execute reduce api. %s\n", spdk_strerror(-io_ctx->status));
100 		spdk_bdev_io_complete(io_ctx->orig_io, SPDK_BDEV_IO_STATUS_FAILED);
101 	}
102 }
103 
104 /* Completion callback for r/w that were issued via reducelib. */
105 static void
106 reduce_rw_blocks_cb(void *arg, int reduce_errno)
107 {
108 	struct spdk_bdev_io *bdev_io = arg;
109 	struct comp_bdev_io *io_ctx = (struct comp_bdev_io *)bdev_io->driver_ctx;
110 	struct spdk_io_channel *ch = spdk_io_channel_from_ctx(io_ctx->comp_ch);
111 	struct spdk_thread *orig_thread;
112 
113 	/* TODO: need to decide which error codes are bdev_io success vs failure;
114 	 * example examine calls reading metadata */
115 
116 	io_ctx->status = reduce_errno;
117 
118 	/* Send this request to the orig IO thread. */
119 	orig_thread = spdk_io_channel_get_thread(ch);
120 
121 	spdk_thread_exec_msg(orig_thread, _reduce_rw_blocks_cb, io_ctx);
122 }
123 
124 static int
125 _compress_operation(struct spdk_reduce_backing_dev *backing_dev, struct iovec *src_iovs,
126 		    int src_iovcnt, struct iovec *dst_iovs,
127 		    int dst_iovcnt, bool compress, void *cb_arg)
128 {
129 	struct spdk_reduce_vol_cb_args *reduce_cb_arg = cb_arg;
130 	struct vbdev_compress *comp_bdev = SPDK_CONTAINEROF(backing_dev, struct vbdev_compress,
131 					   backing_dev);
132 	int rc;
133 
134 	if (compress) {
135 		assert(dst_iovcnt == 1);
136 		rc = spdk_accel_submit_compress(comp_bdev->accel_channel, dst_iovs[0].iov_base, dst_iovs[0].iov_len,
137 						src_iovs, src_iovcnt, &reduce_cb_arg->output_size,
138 						reduce_cb_arg->cb_fn, reduce_cb_arg->cb_arg);
139 	} else {
140 		rc = spdk_accel_submit_decompress(comp_bdev->accel_channel, dst_iovs, dst_iovcnt,
141 						  src_iovs, src_iovcnt, &reduce_cb_arg->output_size,
142 						  reduce_cb_arg->cb_fn, reduce_cb_arg->cb_arg);
143 	}
144 
145 	return rc;
146 }
147 
148 /* Entry point for reduce lib to issue a compress operation. */
149 static void
150 _comp_reduce_compress(struct spdk_reduce_backing_dev *dev,
151 		      struct iovec *src_iovs, int src_iovcnt,
152 		      struct iovec *dst_iovs, int dst_iovcnt,
153 		      struct spdk_reduce_vol_cb_args *cb_arg)
154 {
155 	int rc;
156 
157 	rc = _compress_operation(dev, src_iovs, src_iovcnt, dst_iovs, dst_iovcnt, true, cb_arg);
158 	if (rc) {
159 		SPDK_ERRLOG("with compress operation code %d (%s)\n", rc, spdk_strerror(-rc));
160 		cb_arg->cb_fn(cb_arg->cb_arg, rc);
161 	}
162 }
163 
164 /* Entry point for reduce lib to issue a decompress operation. */
165 static void
166 _comp_reduce_decompress(struct spdk_reduce_backing_dev *dev,
167 			struct iovec *src_iovs, int src_iovcnt,
168 			struct iovec *dst_iovs, int dst_iovcnt,
169 			struct spdk_reduce_vol_cb_args *cb_arg)
170 {
171 	int rc;
172 
173 	rc = _compress_operation(dev, src_iovs, src_iovcnt, dst_iovs, dst_iovcnt, false, cb_arg);
174 	if (rc) {
175 		SPDK_ERRLOG("with decompress operation code %d (%s)\n", rc, spdk_strerror(-rc));
176 		cb_arg->cb_fn(cb_arg->cb_arg, rc);
177 	}
178 }
179 
180 static void
181 _comp_submit_write(void *ctx)
182 {
183 	struct spdk_bdev_io *bdev_io = ctx;
184 	struct vbdev_compress *comp_bdev = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_compress,
185 					   comp_bdev);
186 
187 	spdk_reduce_vol_writev(comp_bdev->vol, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
188 			       bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks,
189 			       reduce_rw_blocks_cb, bdev_io);
190 }
191 
192 static void
193 _comp_submit_read(void *ctx)
194 {
195 	struct spdk_bdev_io *bdev_io = ctx;
196 	struct vbdev_compress *comp_bdev = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_compress,
197 					   comp_bdev);
198 
199 	spdk_reduce_vol_readv(comp_bdev->vol, bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
200 			      bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks,
201 			      reduce_rw_blocks_cb, bdev_io);
202 }
203 
204 
205 /* Callback for getting a buf from the bdev pool in the event that the caller passed
206  * in NULL, we need to own the buffer so it doesn't get freed by another vbdev module
207  * beneath us before we're done with it.
208  */
209 static void
210 comp_read_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success)
211 {
212 	struct vbdev_compress *comp_bdev = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_compress,
213 					   comp_bdev);
214 
215 	if (spdk_unlikely(!success)) {
216 		SPDK_ERRLOG("Failed to get data buffer\n");
217 		reduce_rw_blocks_cb(bdev_io, -ENOMEM);
218 		return;
219 	}
220 
221 	spdk_thread_exec_msg(comp_bdev->reduce_thread, _comp_submit_read, bdev_io);
222 }
223 
224 /* Called when someone above submits IO to this vbdev. */
225 static void
226 vbdev_compress_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
227 {
228 	struct comp_bdev_io *io_ctx = (struct comp_bdev_io *)bdev_io->driver_ctx;
229 	struct vbdev_compress *comp_bdev = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_compress,
230 					   comp_bdev);
231 	struct comp_io_channel *comp_ch = spdk_io_channel_get_ctx(ch);
232 
233 	memset(io_ctx, 0, sizeof(struct comp_bdev_io));
234 	io_ctx->comp_bdev = comp_bdev;
235 	io_ctx->comp_ch = comp_ch;
236 	io_ctx->orig_io = bdev_io;
237 
238 	switch (bdev_io->type) {
239 	case SPDK_BDEV_IO_TYPE_READ:
240 		spdk_bdev_io_get_buf(bdev_io, comp_read_get_buf_cb,
241 				     bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
242 		return;
243 	case SPDK_BDEV_IO_TYPE_WRITE:
244 		spdk_thread_exec_msg(comp_bdev->reduce_thread, _comp_submit_write, bdev_io);
245 		return;
246 	/* TODO support RESET in future patch in the series */
247 	case SPDK_BDEV_IO_TYPE_RESET:
248 	case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
249 	case SPDK_BDEV_IO_TYPE_UNMAP:
250 	case SPDK_BDEV_IO_TYPE_FLUSH:
251 	default:
252 		SPDK_ERRLOG("Unknown I/O type %d\n", bdev_io->type);
253 		spdk_bdev_io_complete(io_ctx->orig_io, SPDK_BDEV_IO_STATUS_FAILED);
254 		break;
255 	}
256 }
257 
258 static bool
259 vbdev_compress_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type)
260 {
261 	struct vbdev_compress *comp_bdev = (struct vbdev_compress *)ctx;
262 
263 	switch (io_type) {
264 	case SPDK_BDEV_IO_TYPE_READ:
265 	case SPDK_BDEV_IO_TYPE_WRITE:
266 		return spdk_bdev_io_type_supported(comp_bdev->base_bdev, io_type);
267 	case SPDK_BDEV_IO_TYPE_UNMAP:
268 	case SPDK_BDEV_IO_TYPE_RESET:
269 	case SPDK_BDEV_IO_TYPE_FLUSH:
270 	case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
271 	default:
272 		return false;
273 	}
274 }
275 
276 /* Resubmission function used by the bdev layer when a queued IO is ready to be
277  * submitted.
278  */
279 static void
280 vbdev_compress_resubmit_io(void *arg)
281 {
282 	struct spdk_bdev_io *bdev_io = (struct spdk_bdev_io *)arg;
283 	struct comp_bdev_io *io_ctx = (struct comp_bdev_io *)bdev_io->driver_ctx;
284 
285 	vbdev_compress_submit_request(io_ctx->ch, bdev_io);
286 }
287 
288 /* Used to queue an IO in the event of resource issues. */
289 static void
290 vbdev_compress_queue_io(struct spdk_bdev_io *bdev_io)
291 {
292 	struct comp_bdev_io *io_ctx = (struct comp_bdev_io *)bdev_io->driver_ctx;
293 	int rc;
294 
295 	io_ctx->bdev_io_wait.bdev = bdev_io->bdev;
296 	io_ctx->bdev_io_wait.cb_fn = vbdev_compress_resubmit_io;
297 	io_ctx->bdev_io_wait.cb_arg = bdev_io;
298 
299 	rc = spdk_bdev_queue_io_wait(bdev_io->bdev, io_ctx->comp_bdev->base_ch, &io_ctx->bdev_io_wait);
300 	if (rc) {
301 		SPDK_ERRLOG("Queue io failed in vbdev_compress_queue_io, rc=%d.\n", rc);
302 		assert(false);
303 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
304 	}
305 }
306 
307 /* Callback for unregistering the IO device. */
308 static void
309 _device_unregister_cb(void *io_device)
310 {
311 	struct vbdev_compress *comp_bdev = io_device;
312 
313 	/* Done with this comp_bdev. */
314 	pthread_mutex_destroy(&comp_bdev->reduce_lock);
315 	free(comp_bdev->comp_bdev.name);
316 	free(comp_bdev);
317 }
318 
319 static void
320 _vbdev_compress_destruct_cb(void *ctx)
321 {
322 	struct vbdev_compress *comp_bdev = ctx;
323 
324 	TAILQ_REMOVE(&g_vbdev_comp, comp_bdev, link);
325 	spdk_bdev_module_release_bdev(comp_bdev->base_bdev);
326 	/* Close the underlying bdev on its same opened thread. */
327 	spdk_bdev_close(comp_bdev->base_desc);
328 	comp_bdev->vol = NULL;
329 	if (comp_bdev->orphaned == false) {
330 		spdk_io_device_unregister(comp_bdev, _device_unregister_cb);
331 	} else {
332 		vbdev_compress_delete_done(comp_bdev->delete_ctx, 0);
333 		_device_unregister_cb(comp_bdev);
334 	}
335 }
336 
337 static void
338 vbdev_compress_destruct_cb(void *cb_arg, int reduce_errno)
339 {
340 	struct vbdev_compress *comp_bdev = (struct vbdev_compress *)cb_arg;
341 
342 	if (reduce_errno) {
343 		SPDK_ERRLOG("number %d\n", reduce_errno);
344 	} else {
345 		if (comp_bdev->thread && comp_bdev->thread != spdk_get_thread()) {
346 			spdk_thread_send_msg(comp_bdev->thread,
347 					     _vbdev_compress_destruct_cb, comp_bdev);
348 		} else {
349 			_vbdev_compress_destruct_cb(comp_bdev);
350 		}
351 	}
352 }
353 
354 static void
355 _reduce_destroy_cb(void *ctx, int reduce_errno)
356 {
357 	struct vbdev_compress *comp_bdev = (struct vbdev_compress *)ctx;
358 
359 	if (reduce_errno) {
360 		SPDK_ERRLOG("number %d\n", reduce_errno);
361 	}
362 
363 	comp_bdev->vol = NULL;
364 	spdk_put_io_channel(comp_bdev->base_ch);
365 	if (comp_bdev->orphaned == false) {
366 		spdk_bdev_unregister(&comp_bdev->comp_bdev, vbdev_compress_delete_done,
367 				     comp_bdev->delete_ctx);
368 	} else {
369 		vbdev_compress_destruct_cb((void *)comp_bdev, 0);
370 	}
371 
372 }
373 
374 static void
375 _delete_vol_unload_cb(void *ctx)
376 {
377 	struct vbdev_compress *comp_bdev = ctx;
378 
379 	/* FIXME: Assert if these conditions are not satisfied for now. */
380 	assert(!comp_bdev->reduce_thread ||
381 	       comp_bdev->reduce_thread == spdk_get_thread());
382 
383 	/* reducelib needs a channel to comm with the backing device */
384 	comp_bdev->base_ch = spdk_bdev_get_io_channel(comp_bdev->base_desc);
385 
386 	/* Clean the device before we free our resources. */
387 	spdk_reduce_vol_destroy(&comp_bdev->backing_dev, _reduce_destroy_cb, comp_bdev);
388 }
389 
390 /* Called by reduceLib after performing unload vol actions */
391 static void
392 delete_vol_unload_cb(void *cb_arg, int reduce_errno)
393 {
394 	struct vbdev_compress *comp_bdev = (struct vbdev_compress *)cb_arg;
395 
396 	if (reduce_errno) {
397 		SPDK_ERRLOG("number %d\n", reduce_errno);
398 		/* FIXME: callback should be executed. */
399 		return;
400 	}
401 
402 	pthread_mutex_lock(&comp_bdev->reduce_lock);
403 	if (comp_bdev->reduce_thread && comp_bdev->reduce_thread != spdk_get_thread()) {
404 		spdk_thread_send_msg(comp_bdev->reduce_thread,
405 				     _delete_vol_unload_cb, comp_bdev);
406 		pthread_mutex_unlock(&comp_bdev->reduce_lock);
407 	} else {
408 		pthread_mutex_unlock(&comp_bdev->reduce_lock);
409 
410 		_delete_vol_unload_cb(comp_bdev);
411 	}
412 }
413 
414 const char *
415 compress_get_name(const struct vbdev_compress *comp_bdev)
416 {
417 	return comp_bdev->comp_bdev.name;
418 }
419 
420 struct vbdev_compress *
421 compress_bdev_first(void)
422 {
423 	struct vbdev_compress *comp_bdev;
424 
425 	comp_bdev = TAILQ_FIRST(&g_vbdev_comp);
426 
427 	return comp_bdev;
428 }
429 
430 struct vbdev_compress *
431 compress_bdev_next(struct vbdev_compress *prev)
432 {
433 	struct vbdev_compress *comp_bdev;
434 
435 	comp_bdev = TAILQ_NEXT(prev, link);
436 
437 	return comp_bdev;
438 }
439 
440 bool
441 compress_has_orphan(const char *name)
442 {
443 	struct vbdev_compress *comp_bdev;
444 
445 	TAILQ_FOREACH(comp_bdev, &g_vbdev_comp, link) {
446 		if (comp_bdev->orphaned && strcmp(name, comp_bdev->comp_bdev.name) == 0) {
447 			return true;
448 		}
449 	}
450 	return false;
451 }
452 
453 /* Called after we've unregistered following a hot remove callback.
454  * Our finish entry point will be called next.
455  */
456 static int
457 vbdev_compress_destruct(void *ctx)
458 {
459 	struct vbdev_compress *comp_bdev = (struct vbdev_compress *)ctx;
460 
461 	if (comp_bdev->vol != NULL) {
462 		/* Tell reducelib that we're done with this volume. */
463 		spdk_reduce_vol_unload(comp_bdev->vol, vbdev_compress_destruct_cb, comp_bdev);
464 	} else {
465 		vbdev_compress_destruct_cb(comp_bdev, 0);
466 	}
467 
468 	return 0;
469 }
470 
471 /* We supplied this as an entry point for upper layers who want to communicate to this
472  * bdev.  This is how they get a channel.
473  */
474 static struct spdk_io_channel *
475 vbdev_compress_get_io_channel(void *ctx)
476 {
477 	struct vbdev_compress *comp_bdev = (struct vbdev_compress *)ctx;
478 
479 	/* The IO channel code will allocate a channel for us which consists of
480 	 * the SPDK channel structure plus the size of our comp_io_channel struct
481 	 * that we passed in when we registered our IO device. It will then call
482 	 * our channel create callback to populate any elements that we need to
483 	 * update.
484 	 */
485 	return spdk_get_io_channel(comp_bdev);
486 }
487 
488 /* This is the output for bdev_get_bdevs() for this vbdev */
489 static int
490 vbdev_compress_dump_info_json(void *ctx, struct spdk_json_write_ctx *w)
491 {
492 	struct vbdev_compress *comp_bdev = (struct vbdev_compress *)ctx;
493 
494 	spdk_json_write_name(w, "compress");
495 	spdk_json_write_object_begin(w);
496 	spdk_json_write_named_string(w, "name", spdk_bdev_get_name(&comp_bdev->comp_bdev));
497 	spdk_json_write_named_string(w, "base_bdev_name", spdk_bdev_get_name(comp_bdev->base_bdev));
498 	spdk_json_write_object_end(w);
499 
500 	return 0;
501 }
502 
503 static int
504 vbdev_compress_config_json(struct spdk_json_write_ctx *w)
505 {
506 	/* Nothing to dump as compress bdev configuration is saved on physical device. */
507 	return 0;
508 }
509 
510 static void
511 _vbdev_reduce_init_cb(void *ctx)
512 {
513 	struct vbdev_compress *meta_ctx = ctx;
514 	int rc;
515 
516 	assert(meta_ctx->base_desc != NULL);
517 
518 	/* We're done with metadata operations */
519 	spdk_put_io_channel(meta_ctx->base_ch);
520 
521 	if (meta_ctx->vol) {
522 		rc = vbdev_compress_claim(meta_ctx);
523 		if (rc == 0) {
524 			return;
525 		}
526 	}
527 
528 	/* Close the underlying bdev on its same opened thread. */
529 	spdk_bdev_close(meta_ctx->base_desc);
530 	free(meta_ctx);
531 }
532 
533 /* Callback from reduce for when init is complete. We'll pass the vbdev_comp struct
534  * used for initial metadata operations to claim where it will be further filled out
535  * and added to the global list.
536  */
537 static void
538 vbdev_reduce_init_cb(void *cb_arg, struct spdk_reduce_vol *vol, int reduce_errno)
539 {
540 	struct vbdev_compress *meta_ctx = cb_arg;
541 
542 	if (reduce_errno == 0) {
543 		meta_ctx->vol = vol;
544 	} else {
545 		SPDK_ERRLOG("for vol %s, error %u\n",
546 			    spdk_bdev_get_name(meta_ctx->base_bdev), reduce_errno);
547 	}
548 
549 	if (meta_ctx->thread && meta_ctx->thread != spdk_get_thread()) {
550 		spdk_thread_send_msg(meta_ctx->thread, _vbdev_reduce_init_cb, meta_ctx);
551 	} else {
552 		_vbdev_reduce_init_cb(meta_ctx);
553 	}
554 }
555 
556 /* Callback for the function used by reduceLib to perform IO to/from the backing device. We just
557  * call the callback provided by reduceLib when it called the read/write/unmap function and
558  * free the bdev_io.
559  */
560 static void
561 comp_reduce_io_cb(struct spdk_bdev_io *bdev_io, bool success, void *arg)
562 {
563 	struct spdk_reduce_vol_cb_args *cb_args = arg;
564 	int reduce_errno;
565 
566 	if (success) {
567 		reduce_errno = 0;
568 	} else {
569 		reduce_errno = -EIO;
570 	}
571 	spdk_bdev_free_io(bdev_io);
572 	cb_args->cb_fn(cb_args->cb_arg, reduce_errno);
573 }
574 
575 /* This is the function provided to the reduceLib for sending reads directly to
576  * the backing device.
577  */
578 static void
579 _comp_reduce_readv(struct spdk_reduce_backing_dev *dev, struct iovec *iov, int iovcnt,
580 		   uint64_t lba, uint32_t lba_count, struct spdk_reduce_vol_cb_args *args)
581 {
582 	struct vbdev_compress *comp_bdev = SPDK_CONTAINEROF(dev, struct vbdev_compress,
583 					   backing_dev);
584 	int rc;
585 
586 	rc = spdk_bdev_readv_blocks(comp_bdev->base_desc, comp_bdev->base_ch,
587 				    iov, iovcnt, lba, lba_count,
588 				    comp_reduce_io_cb,
589 				    args);
590 	if (rc) {
591 		if (rc == -ENOMEM) {
592 			SPDK_ERRLOG("No memory, start to queue io.\n");
593 			/* TODO: there's no bdev_io to queue */
594 		} else {
595 			SPDK_ERRLOG("submitting readv request\n");
596 		}
597 		args->cb_fn(args->cb_arg, rc);
598 	}
599 }
600 
601 /* This is the function provided to the reduceLib for sending writes directly to
602  * the backing device.
603  */
604 static void
605 _comp_reduce_writev(struct spdk_reduce_backing_dev *dev, struct iovec *iov, int iovcnt,
606 		    uint64_t lba, uint32_t lba_count, struct spdk_reduce_vol_cb_args *args)
607 {
608 	struct vbdev_compress *comp_bdev = SPDK_CONTAINEROF(dev, struct vbdev_compress,
609 					   backing_dev);
610 	int rc;
611 
612 	rc = spdk_bdev_writev_blocks(comp_bdev->base_desc, comp_bdev->base_ch,
613 				     iov, iovcnt, lba, lba_count,
614 				     comp_reduce_io_cb,
615 				     args);
616 	if (rc) {
617 		if (rc == -ENOMEM) {
618 			SPDK_ERRLOG("No memory, start to queue io.\n");
619 			/* TODO: there's no bdev_io to queue */
620 		} else {
621 			SPDK_ERRLOG("error submitting writev request\n");
622 		}
623 		args->cb_fn(args->cb_arg, rc);
624 	}
625 }
626 
627 /* This is the function provided to the reduceLib for sending unmaps directly to
628  * the backing device.
629  */
630 static void
631 _comp_reduce_unmap(struct spdk_reduce_backing_dev *dev,
632 		   uint64_t lba, uint32_t lba_count, struct spdk_reduce_vol_cb_args *args)
633 {
634 	struct vbdev_compress *comp_bdev = SPDK_CONTAINEROF(dev, struct vbdev_compress,
635 					   backing_dev);
636 	int rc;
637 
638 	rc = spdk_bdev_unmap_blocks(comp_bdev->base_desc, comp_bdev->base_ch,
639 				    lba, lba_count,
640 				    comp_reduce_io_cb,
641 				    args);
642 
643 	if (rc) {
644 		if (rc == -ENOMEM) {
645 			SPDK_ERRLOG("No memory, start to queue io.\n");
646 			/* TODO: there's no bdev_io to queue */
647 		} else {
648 			SPDK_ERRLOG("submitting unmap request\n");
649 		}
650 		args->cb_fn(args->cb_arg, rc);
651 	}
652 }
653 
654 /* Called by reduceLib after performing unload vol actions following base bdev hotremove */
655 static void
656 bdev_hotremove_vol_unload_cb(void *cb_arg, int reduce_errno)
657 {
658 	struct vbdev_compress *comp_bdev = (struct vbdev_compress *)cb_arg;
659 
660 	if (reduce_errno) {
661 		SPDK_ERRLOG("number %d\n", reduce_errno);
662 	}
663 
664 	comp_bdev->vol = NULL;
665 	spdk_bdev_unregister(&comp_bdev->comp_bdev, NULL, NULL);
666 }
667 
668 static void
669 vbdev_compress_base_bdev_hotremove_cb(struct spdk_bdev *bdev_find)
670 {
671 	struct vbdev_compress *comp_bdev, *tmp;
672 
673 	TAILQ_FOREACH_SAFE(comp_bdev, &g_vbdev_comp, link, tmp) {
674 		if (bdev_find == comp_bdev->base_bdev) {
675 			/* Tell reduceLib that we're done with this volume. */
676 			spdk_reduce_vol_unload(comp_bdev->vol, bdev_hotremove_vol_unload_cb, comp_bdev);
677 		}
678 	}
679 }
680 
681 /* Called when the underlying base bdev triggers asynchronous event such as bdev removal. */
682 static void
683 vbdev_compress_base_bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev,
684 				  void *event_ctx)
685 {
686 	switch (type) {
687 	case SPDK_BDEV_EVENT_REMOVE:
688 		vbdev_compress_base_bdev_hotremove_cb(bdev);
689 		break;
690 	default:
691 		SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type);
692 		break;
693 	}
694 }
695 
696 /* TODO: determine which parms we want user configurable, HC for now
697  * params.vol_size
698  * params.chunk_size
699  * compression PMD, algorithm, window size, comp level, etc.
700  * DEV_MD_PATH
701  */
702 
703 /* Common function for init and load to allocate and populate the minimal
704  * information for reducelib to init or load.
705  */
706 struct vbdev_compress *
707 _prepare_for_load_init(struct spdk_bdev_desc *bdev_desc, uint32_t lb_size)
708 {
709 	struct vbdev_compress *meta_ctx;
710 	struct spdk_bdev *bdev;
711 
712 	meta_ctx = calloc(1, sizeof(struct vbdev_compress));
713 	if (meta_ctx == NULL) {
714 		SPDK_ERRLOG("failed to alloc init contexts\n");
715 		return NULL;
716 	}
717 
718 	meta_ctx->backing_dev.unmap = _comp_reduce_unmap;
719 	meta_ctx->backing_dev.readv = _comp_reduce_readv;
720 	meta_ctx->backing_dev.writev = _comp_reduce_writev;
721 	meta_ctx->backing_dev.compress = _comp_reduce_compress;
722 	meta_ctx->backing_dev.decompress = _comp_reduce_decompress;
723 
724 	meta_ctx->base_desc = bdev_desc;
725 	bdev = spdk_bdev_desc_get_bdev(bdev_desc);
726 	meta_ctx->base_bdev = bdev;
727 
728 	meta_ctx->backing_dev.blocklen = bdev->blocklen;
729 	meta_ctx->backing_dev.blockcnt = bdev->blockcnt;
730 
731 	meta_ctx->params.chunk_size = CHUNK_SIZE;
732 	if (lb_size == 0) {
733 		meta_ctx->params.logical_block_size = bdev->blocklen;
734 	} else {
735 		meta_ctx->params.logical_block_size = lb_size;
736 	}
737 
738 	meta_ctx->params.backing_io_unit_size = BACKING_IO_SZ;
739 	return meta_ctx;
740 }
741 
742 /* Call reducelib to initialize a new volume */
743 static int
744 vbdev_init_reduce(const char *bdev_name, const char *pm_path, uint32_t lb_size)
745 {
746 	struct spdk_bdev_desc *bdev_desc = NULL;
747 	struct vbdev_compress *meta_ctx;
748 	int rc;
749 
750 	rc = spdk_bdev_open_ext(bdev_name, true, vbdev_compress_base_bdev_event_cb,
751 				NULL, &bdev_desc);
752 	if (rc) {
753 		SPDK_ERRLOG("could not open bdev %s\n", bdev_name);
754 		return rc;
755 	}
756 
757 	meta_ctx = _prepare_for_load_init(bdev_desc, lb_size);
758 	if (meta_ctx == NULL) {
759 		spdk_bdev_close(bdev_desc);
760 		return -EINVAL;
761 	}
762 
763 	/* Save the thread where the base device is opened */
764 	meta_ctx->thread = spdk_get_thread();
765 
766 	meta_ctx->base_ch = spdk_bdev_get_io_channel(meta_ctx->base_desc);
767 
768 	spdk_reduce_vol_init(&meta_ctx->params, &meta_ctx->backing_dev,
769 			     pm_path,
770 			     vbdev_reduce_init_cb,
771 			     meta_ctx);
772 	return 0;
773 }
774 
775 /* We provide this callback for the SPDK channel code to create a channel using
776  * the channel struct we provided in our module get_io_channel() entry point. Here
777  * we get and save off an underlying base channel of the device below us so that
778  * we can communicate with the base bdev on a per channel basis.  If we needed
779  * our own poller for this vbdev, we'd register it here.
780  */
781 static int
782 comp_bdev_ch_create_cb(void *io_device, void *ctx_buf)
783 {
784 	struct vbdev_compress *comp_bdev = io_device;
785 
786 	/* Now set the reduce channel if it's not already set. */
787 	pthread_mutex_lock(&comp_bdev->reduce_lock);
788 	if (comp_bdev->ch_count == 0) {
789 		/* We use this queue to track outstanding IO in our layer. */
790 		TAILQ_INIT(&comp_bdev->pending_comp_ios);
791 
792 		/* We use this to queue up compression operations as needed. */
793 		TAILQ_INIT(&comp_bdev->queued_comp_ops);
794 
795 		comp_bdev->base_ch = spdk_bdev_get_io_channel(comp_bdev->base_desc);
796 		comp_bdev->reduce_thread = spdk_get_thread();
797 		comp_bdev->accel_channel = spdk_accel_get_io_channel();
798 	}
799 	comp_bdev->ch_count++;
800 	pthread_mutex_unlock(&comp_bdev->reduce_lock);
801 
802 	return 0;
803 }
804 
805 static void
806 _channel_cleanup(struct vbdev_compress *comp_bdev)
807 {
808 	spdk_put_io_channel(comp_bdev->base_ch);
809 	spdk_put_io_channel(comp_bdev->accel_channel);
810 	comp_bdev->reduce_thread = NULL;
811 }
812 
813 /* Used to reroute destroy_ch to the correct thread */
814 static void
815 _comp_bdev_ch_destroy_cb(void *arg)
816 {
817 	struct vbdev_compress *comp_bdev = arg;
818 
819 	pthread_mutex_lock(&comp_bdev->reduce_lock);
820 	_channel_cleanup(comp_bdev);
821 	pthread_mutex_unlock(&comp_bdev->reduce_lock);
822 }
823 
824 /* We provide this callback for the SPDK channel code to destroy a channel
825  * created with our create callback. We just need to undo anything we did
826  * when we created. If this bdev used its own poller, we'd unregister it here.
827  */
828 static void
829 comp_bdev_ch_destroy_cb(void *io_device, void *ctx_buf)
830 {
831 	struct vbdev_compress *comp_bdev = io_device;
832 
833 	pthread_mutex_lock(&comp_bdev->reduce_lock);
834 	comp_bdev->ch_count--;
835 	if (comp_bdev->ch_count == 0) {
836 		/* Send this request to the thread where the channel was created. */
837 		if (comp_bdev->reduce_thread != spdk_get_thread()) {
838 			spdk_thread_send_msg(comp_bdev->reduce_thread,
839 					     _comp_bdev_ch_destroy_cb, comp_bdev);
840 		} else {
841 			_channel_cleanup(comp_bdev);
842 		}
843 	}
844 	pthread_mutex_unlock(&comp_bdev->reduce_lock);
845 }
846 
847 /* RPC entry point for compression vbdev creation. */
848 int
849 create_compress_bdev(const char *bdev_name, const char *pm_path, uint32_t lb_size)
850 {
851 	struct vbdev_compress *comp_bdev = NULL;
852 
853 	if ((lb_size != 0) && (lb_size != LB_SIZE_4K) && (lb_size != LB_SIZE_512B)) {
854 		SPDK_ERRLOG("Logical block size must be 512 or 4096\n");
855 		return -EINVAL;
856 	}
857 
858 	TAILQ_FOREACH(comp_bdev, &g_vbdev_comp, link) {
859 		if (strcmp(bdev_name, comp_bdev->base_bdev->name) == 0) {
860 			SPDK_ERRLOG("Bass bdev %s already being used for a compress bdev\n", bdev_name);
861 			return -EBUSY;
862 		}
863 	}
864 	return vbdev_init_reduce(bdev_name, pm_path, lb_size);
865 }
866 
867 static int
868 vbdev_compress_init(void)
869 {
870 	return 0;
871 }
872 
873 /* Called when the entire module is being torn down. */
874 static void
875 vbdev_compress_finish(void)
876 {
877 	/* TODO: unload vol in a future patch */
878 }
879 
880 /* During init we'll be asked how much memory we'd like passed to us
881  * in bev_io structures as context. Here's where we specify how
882  * much context we want per IO.
883  */
884 static int
885 vbdev_compress_get_ctx_size(void)
886 {
887 	return sizeof(struct comp_bdev_io);
888 }
889 
890 /* When we register our bdev this is how we specify our entry points. */
891 static const struct spdk_bdev_fn_table vbdev_compress_fn_table = {
892 	.destruct		= vbdev_compress_destruct,
893 	.submit_request		= vbdev_compress_submit_request,
894 	.io_type_supported	= vbdev_compress_io_type_supported,
895 	.get_io_channel		= vbdev_compress_get_io_channel,
896 	.dump_info_json		= vbdev_compress_dump_info_json,
897 	.write_config_json	= NULL,
898 };
899 
900 static struct spdk_bdev_module compress_if = {
901 	.name = "compress",
902 	.module_init = vbdev_compress_init,
903 	.get_ctx_size = vbdev_compress_get_ctx_size,
904 	.examine_disk = vbdev_compress_examine,
905 	.module_fini = vbdev_compress_finish,
906 	.config_json = vbdev_compress_config_json
907 };
908 
909 SPDK_BDEV_MODULE_REGISTER(compress, &compress_if)
910 
911 static int _set_compbdev_name(struct vbdev_compress *comp_bdev)
912 {
913 	struct spdk_bdev_alias *aliases;
914 
915 	if (!TAILQ_EMPTY(spdk_bdev_get_aliases(comp_bdev->base_bdev))) {
916 		aliases = TAILQ_FIRST(spdk_bdev_get_aliases(comp_bdev->base_bdev));
917 		comp_bdev->comp_bdev.name = spdk_sprintf_alloc("COMP_%s", aliases->alias.name);
918 		if (!comp_bdev->comp_bdev.name) {
919 			SPDK_ERRLOG("could not allocate comp_bdev name for alias\n");
920 			return -ENOMEM;
921 		}
922 	} else {
923 		comp_bdev->comp_bdev.name = spdk_sprintf_alloc("COMP_%s", comp_bdev->base_bdev->name);
924 		if (!comp_bdev->comp_bdev.name) {
925 			SPDK_ERRLOG("could not allocate comp_bdev name for unique name\n");
926 			return -ENOMEM;
927 		}
928 	}
929 	return 0;
930 }
931 
932 static int
933 vbdev_compress_claim(struct vbdev_compress *comp_bdev)
934 {
935 	struct spdk_uuid ns_uuid;
936 	int rc;
937 
938 	if (_set_compbdev_name(comp_bdev)) {
939 		return -EINVAL;
940 	}
941 
942 	/* Note: some of the fields below will change in the future - for example,
943 	 * blockcnt specifically will not match (the compressed volume size will
944 	 * be slightly less than the base bdev size)
945 	 */
946 	comp_bdev->comp_bdev.product_name = COMP_BDEV_NAME;
947 	comp_bdev->comp_bdev.write_cache = comp_bdev->base_bdev->write_cache;
948 
949 	comp_bdev->comp_bdev.optimal_io_boundary =
950 		comp_bdev->params.chunk_size / comp_bdev->params.logical_block_size;
951 
952 	comp_bdev->comp_bdev.split_on_optimal_io_boundary = true;
953 
954 	comp_bdev->comp_bdev.blocklen = comp_bdev->params.logical_block_size;
955 	comp_bdev->comp_bdev.blockcnt = comp_bdev->params.vol_size / comp_bdev->comp_bdev.blocklen;
956 	assert(comp_bdev->comp_bdev.blockcnt > 0);
957 
958 	/* This is the context that is passed to us when the bdev
959 	 * layer calls in so we'll save our comp_bdev node here.
960 	 */
961 	comp_bdev->comp_bdev.ctxt = comp_bdev;
962 	comp_bdev->comp_bdev.fn_table = &vbdev_compress_fn_table;
963 	comp_bdev->comp_bdev.module = &compress_if;
964 
965 	/* Generate UUID based on namespace UUID + base bdev UUID. */
966 	spdk_uuid_parse(&ns_uuid, BDEV_COMPRESS_NAMESPACE_UUID);
967 	rc = spdk_uuid_generate_sha1(&comp_bdev->comp_bdev.uuid, &ns_uuid,
968 				     (const char *)&comp_bdev->base_bdev->uuid, sizeof(struct spdk_uuid));
969 	if (rc) {
970 		SPDK_ERRLOG("Unable to generate new UUID for compress bdev\n");
971 		return -EINVAL;
972 	}
973 
974 	pthread_mutex_init(&comp_bdev->reduce_lock, NULL);
975 
976 	/* Save the thread where the base device is opened */
977 	comp_bdev->thread = spdk_get_thread();
978 
979 	spdk_io_device_register(comp_bdev, comp_bdev_ch_create_cb, comp_bdev_ch_destroy_cb,
980 				sizeof(struct comp_io_channel),
981 				comp_bdev->comp_bdev.name);
982 
983 	rc = spdk_bdev_module_claim_bdev(comp_bdev->base_bdev, comp_bdev->base_desc,
984 					 comp_bdev->comp_bdev.module);
985 	if (rc) {
986 		SPDK_ERRLOG("could not claim bdev %s\n", spdk_bdev_get_name(comp_bdev->base_bdev));
987 		goto error_claim;
988 	}
989 
990 	rc = spdk_bdev_register(&comp_bdev->comp_bdev);
991 	if (rc < 0) {
992 		SPDK_ERRLOG("trying to register bdev\n");
993 		goto error_bdev_register;
994 	}
995 
996 	TAILQ_INSERT_TAIL(&g_vbdev_comp, comp_bdev, link);
997 
998 	SPDK_NOTICELOG("registered io_device and virtual bdev for: %s\n", comp_bdev->comp_bdev.name);
999 
1000 	return 0;
1001 
1002 	/* Error cleanup paths. */
1003 error_bdev_register:
1004 	spdk_bdev_module_release_bdev(comp_bdev->base_bdev);
1005 error_claim:
1006 	spdk_io_device_unregister(comp_bdev, NULL);
1007 	free(comp_bdev->comp_bdev.name);
1008 	return rc;
1009 }
1010 
1011 static void
1012 _vbdev_compress_delete_done(void *_ctx)
1013 {
1014 	struct vbdev_comp_delete_ctx *ctx = _ctx;
1015 
1016 	ctx->cb_fn(ctx->cb_arg, ctx->cb_rc);
1017 
1018 	free(ctx);
1019 }
1020 
1021 static void
1022 vbdev_compress_delete_done(void *cb_arg, int bdeverrno)
1023 {
1024 	struct vbdev_comp_delete_ctx *ctx = cb_arg;
1025 
1026 	ctx->cb_rc = bdeverrno;
1027 
1028 	if (ctx->orig_thread != spdk_get_thread()) {
1029 		spdk_thread_send_msg(ctx->orig_thread, _vbdev_compress_delete_done, ctx);
1030 	} else {
1031 		_vbdev_compress_delete_done(ctx);
1032 	}
1033 }
1034 
1035 void
1036 bdev_compress_delete(const char *name, spdk_delete_compress_complete cb_fn, void *cb_arg)
1037 {
1038 	struct vbdev_compress *comp_bdev = NULL;
1039 	struct vbdev_comp_delete_ctx *ctx;
1040 
1041 	TAILQ_FOREACH(comp_bdev, &g_vbdev_comp, link) {
1042 		if (strcmp(name, comp_bdev->comp_bdev.name) == 0) {
1043 			break;
1044 		}
1045 	}
1046 
1047 	if (comp_bdev == NULL) {
1048 		cb_fn(cb_arg, -ENODEV);
1049 		return;
1050 	}
1051 
1052 	ctx = calloc(1, sizeof(*ctx));
1053 	if (ctx == NULL) {
1054 		SPDK_ERRLOG("Failed to allocate delete context\n");
1055 		cb_fn(cb_arg, -ENOMEM);
1056 		return;
1057 	}
1058 
1059 	/* Save these for after the vol is destroyed. */
1060 	ctx->cb_fn = cb_fn;
1061 	ctx->cb_arg = cb_arg;
1062 	ctx->orig_thread = spdk_get_thread();
1063 
1064 	comp_bdev->delete_ctx = ctx;
1065 
1066 	/* Tell reducelib that we're done with this volume. */
1067 	if (comp_bdev->orphaned == false) {
1068 		spdk_reduce_vol_unload(comp_bdev->vol, delete_vol_unload_cb, comp_bdev);
1069 	} else {
1070 		delete_vol_unload_cb(comp_bdev, 0);
1071 	}
1072 }
1073 
1074 static void
1075 _vbdev_reduce_load_cb(void *ctx)
1076 {
1077 	struct vbdev_compress *meta_ctx = ctx;
1078 	int rc;
1079 
1080 	assert(meta_ctx->base_desc != NULL);
1081 
1082 	/* Done with metadata operations */
1083 	spdk_put_io_channel(meta_ctx->base_ch);
1084 
1085 	if (meta_ctx->reduce_errno == 0) {
1086 		rc = vbdev_compress_claim(meta_ctx);
1087 		if (rc != 0) {
1088 			goto err;
1089 		}
1090 	} else if (meta_ctx->reduce_errno == -ENOENT) {
1091 		if (_set_compbdev_name(meta_ctx)) {
1092 			goto err;
1093 		}
1094 
1095 		/* Save the thread where the base device is opened */
1096 		meta_ctx->thread = spdk_get_thread();
1097 
1098 		meta_ctx->comp_bdev.module = &compress_if;
1099 		pthread_mutex_init(&meta_ctx->reduce_lock, NULL);
1100 		rc = spdk_bdev_module_claim_bdev(meta_ctx->base_bdev, meta_ctx->base_desc,
1101 						 meta_ctx->comp_bdev.module);
1102 		if (rc) {
1103 			SPDK_ERRLOG("could not claim bdev %s\n", spdk_bdev_get_name(meta_ctx->base_bdev));
1104 			free(meta_ctx->comp_bdev.name);
1105 			goto err;
1106 		}
1107 
1108 		meta_ctx->orphaned = true;
1109 		TAILQ_INSERT_TAIL(&g_vbdev_comp, meta_ctx, link);
1110 	} else {
1111 		if (meta_ctx->reduce_errno != -EILSEQ) {
1112 			SPDK_ERRLOG("for vol %s, error %u\n",
1113 				    spdk_bdev_get_name(meta_ctx->base_bdev), meta_ctx->reduce_errno);
1114 		}
1115 		goto err;
1116 	}
1117 
1118 	spdk_bdev_module_examine_done(&compress_if);
1119 	return;
1120 
1121 err:
1122 	/* Close the underlying bdev on its same opened thread. */
1123 	spdk_bdev_close(meta_ctx->base_desc);
1124 	free(meta_ctx);
1125 	spdk_bdev_module_examine_done(&compress_if);
1126 }
1127 
1128 /* Callback from reduce for then load is complete. We'll pass the vbdev_comp struct
1129  * used for initial metadata operations to claim where it will be further filled out
1130  * and added to the global list.
1131  */
1132 static void
1133 vbdev_reduce_load_cb(void *cb_arg, struct spdk_reduce_vol *vol, int reduce_errno)
1134 {
1135 	struct vbdev_compress *meta_ctx = cb_arg;
1136 
1137 	if (reduce_errno == 0) {
1138 		/* Update information following volume load. */
1139 		meta_ctx->vol = vol;
1140 		memcpy(&meta_ctx->params, spdk_reduce_vol_get_params(vol),
1141 		       sizeof(struct spdk_reduce_vol_params));
1142 	}
1143 
1144 	meta_ctx->reduce_errno = reduce_errno;
1145 
1146 	if (meta_ctx->thread && meta_ctx->thread != spdk_get_thread()) {
1147 		spdk_thread_send_msg(meta_ctx->thread, _vbdev_reduce_load_cb, meta_ctx);
1148 	} else {
1149 		_vbdev_reduce_load_cb(meta_ctx);
1150 	}
1151 
1152 }
1153 
1154 /* Examine_disk entry point: will do a metadata load to see if this is ours,
1155  * and if so will go ahead and claim it.
1156  */
1157 static void
1158 vbdev_compress_examine(struct spdk_bdev *bdev)
1159 {
1160 	struct spdk_bdev_desc *bdev_desc = NULL;
1161 	struct vbdev_compress *meta_ctx;
1162 	int rc;
1163 
1164 	if (strcmp(bdev->product_name, COMP_BDEV_NAME) == 0) {
1165 		spdk_bdev_module_examine_done(&compress_if);
1166 		return;
1167 	}
1168 
1169 	rc = spdk_bdev_open_ext(spdk_bdev_get_name(bdev), false,
1170 				vbdev_compress_base_bdev_event_cb, NULL, &bdev_desc);
1171 	if (rc) {
1172 		SPDK_ERRLOG("could not open bdev %s\n", spdk_bdev_get_name(bdev));
1173 		spdk_bdev_module_examine_done(&compress_if);
1174 		return;
1175 	}
1176 
1177 	meta_ctx = _prepare_for_load_init(bdev_desc, 0);
1178 	if (meta_ctx == NULL) {
1179 		spdk_bdev_close(bdev_desc);
1180 		spdk_bdev_module_examine_done(&compress_if);
1181 		return;
1182 	}
1183 
1184 	/* Save the thread where the base device is opened */
1185 	meta_ctx->thread = spdk_get_thread();
1186 
1187 	meta_ctx->base_ch = spdk_bdev_get_io_channel(meta_ctx->base_desc);
1188 	spdk_reduce_vol_load(&meta_ctx->backing_dev, vbdev_reduce_load_cb, meta_ctx);
1189 }
1190 
1191 SPDK_LOG_REGISTER_COMPONENT(vbdev_compress)
1192