xref: /spdk/module/bdev/malloc/bdev_malloc.c (revision 1a526000d070b65d326b2d442fb259497b561188)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "bdev_malloc.h"
10 #include "spdk/endian.h"
11 #include "spdk/env.h"
12 #include "spdk/accel.h"
13 #include "spdk/string.h"
14 
15 #include "spdk/log.h"
16 
17 struct malloc_disk {
18 	struct spdk_bdev		disk;
19 	void				*malloc_buf;
20 	void				*malloc_md_buf;
21 	TAILQ_ENTRY(malloc_disk)	link;
22 };
23 
24 struct malloc_task {
25 	int				num_outstanding;
26 	enum spdk_bdev_io_status	status;
27 	TAILQ_ENTRY(malloc_task)	tailq;
28 };
29 
30 struct malloc_channel {
31 	struct spdk_io_channel		*accel_channel;
32 	struct spdk_poller		*completion_poller;
33 	TAILQ_HEAD(, malloc_task)	completed_tasks;
34 };
35 
36 static int
37 malloc_verify_pi(struct spdk_bdev_io *bdev_io)
38 {
39 	struct spdk_bdev *bdev = bdev_io->bdev;
40 	struct spdk_dif_ctx dif_ctx;
41 	struct spdk_dif_error err_blk;
42 	int rc;
43 
44 	rc = spdk_dif_ctx_init(&dif_ctx,
45 			       bdev->blocklen,
46 			       bdev->md_len,
47 			       bdev->md_interleave,
48 			       bdev->dif_is_head_of_md,
49 			       bdev->dif_type,
50 			       bdev->dif_check_flags,
51 			       bdev_io->u.bdev.offset_blocks & 0xFFFFFFFF,
52 			       0xFFFF, 0, 0, 0);
53 	if (rc != 0) {
54 		SPDK_ERRLOG("Failed to initialize DIF/DIX context\n");
55 		return rc;
56 	}
57 
58 	if (spdk_bdev_is_md_interleaved(bdev)) {
59 		rc = spdk_dif_verify(bdev_io->u.bdev.iovs,
60 				     bdev_io->u.bdev.iovcnt,
61 				     bdev_io->u.bdev.num_blocks,
62 				     &dif_ctx,
63 				     &err_blk);
64 	} else {
65 		struct iovec md_iov = {
66 			.iov_base	= bdev_io->u.bdev.md_buf,
67 			.iov_len	= bdev_io->u.bdev.num_blocks * bdev->md_len,
68 		};
69 
70 		rc = spdk_dix_verify(bdev_io->u.bdev.iovs,
71 				     bdev_io->u.bdev.iovcnt,
72 				     &md_iov,
73 				     bdev_io->u.bdev.num_blocks,
74 				     &dif_ctx,
75 				     &err_blk);
76 	}
77 
78 	if (rc != 0) {
79 		SPDK_ERRLOG("DIF/DIX verify failed: lba %" PRIu64 ", num_blocks %" PRIu64 ", "
80 			    "err_type %u, expected %u, actual %u, err_offset %u\n",
81 			    bdev_io->u.bdev.offset_blocks,
82 			    bdev_io->u.bdev.num_blocks,
83 			    err_blk.err_type,
84 			    err_blk.expected,
85 			    err_blk.actual,
86 			    err_blk.err_offset);
87 	}
88 
89 	return rc;
90 }
91 
92 static void
93 malloc_done(void *ref, int status)
94 {
95 	struct malloc_task *task = (struct malloc_task *)ref;
96 	struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(task);
97 	int rc;
98 
99 	if (status != 0) {
100 		if (status == -ENOMEM) {
101 			task->status = SPDK_BDEV_IO_STATUS_NOMEM;
102 		} else {
103 			task->status = SPDK_BDEV_IO_STATUS_FAILED;
104 		}
105 	}
106 
107 	if (--task->num_outstanding != 0) {
108 		return;
109 	}
110 
111 	if (bdev_io->bdev->dif_type != SPDK_DIF_DISABLE &&
112 	    bdev_io->type == SPDK_BDEV_IO_TYPE_READ &&
113 	    task->status == SPDK_BDEV_IO_STATUS_SUCCESS) {
114 		rc = malloc_verify_pi(bdev_io);
115 		if (rc != 0) {
116 			task->status = SPDK_BDEV_IO_STATUS_FAILED;
117 		}
118 	}
119 
120 	spdk_bdev_io_complete(spdk_bdev_io_from_ctx(task), task->status);
121 }
122 
123 static void
124 malloc_complete_task(struct malloc_task *task, struct malloc_channel *mch,
125 		     enum spdk_bdev_io_status status)
126 {
127 	task->status = status;
128 	TAILQ_INSERT_TAIL(&mch->completed_tasks, task, tailq);
129 }
130 
131 static TAILQ_HEAD(, malloc_disk) g_malloc_disks = TAILQ_HEAD_INITIALIZER(g_malloc_disks);
132 
133 int malloc_disk_count = 0;
134 
135 static int bdev_malloc_initialize(void);
136 static void bdev_malloc_deinitialize(void);
137 
138 static int
139 bdev_malloc_get_ctx_size(void)
140 {
141 	return sizeof(struct malloc_task);
142 }
143 
144 static struct spdk_bdev_module malloc_if = {
145 	.name = "malloc",
146 	.module_init = bdev_malloc_initialize,
147 	.module_fini = bdev_malloc_deinitialize,
148 	.get_ctx_size = bdev_malloc_get_ctx_size,
149 
150 };
151 
152 SPDK_BDEV_MODULE_REGISTER(malloc, &malloc_if)
153 
154 static void
155 malloc_disk_free(struct malloc_disk *malloc_disk)
156 {
157 	if (!malloc_disk) {
158 		return;
159 	}
160 
161 	free(malloc_disk->disk.name);
162 	spdk_free(malloc_disk->malloc_buf);
163 	spdk_free(malloc_disk->malloc_md_buf);
164 	free(malloc_disk);
165 }
166 
167 static int
168 bdev_malloc_destruct(void *ctx)
169 {
170 	struct malloc_disk *malloc_disk = ctx;
171 
172 	TAILQ_REMOVE(&g_malloc_disks, malloc_disk, link);
173 	malloc_disk_free(malloc_disk);
174 	return 0;
175 }
176 
177 static int
178 bdev_malloc_check_iov_len(struct iovec *iovs, int iovcnt, size_t nbytes)
179 {
180 	int i;
181 
182 	for (i = 0; i < iovcnt; i++) {
183 		if (nbytes < iovs[i].iov_len) {
184 			return 0;
185 		}
186 
187 		nbytes -= iovs[i].iov_len;
188 	}
189 
190 	return nbytes != 0;
191 }
192 
193 static void
194 bdev_malloc_readv(struct malloc_disk *mdisk, struct spdk_io_channel *ch,
195 		  struct malloc_task *task,
196 		  struct iovec *iov, int iovcnt, size_t len, uint64_t offset,
197 		  void *md_buf, size_t md_len, uint64_t md_offset)
198 {
199 	int64_t res = 0;
200 	void *src;
201 	void *md_src;
202 	int i;
203 
204 	if (bdev_malloc_check_iov_len(iov, iovcnt, len)) {
205 		spdk_bdev_io_complete(spdk_bdev_io_from_ctx(task),
206 				      SPDK_BDEV_IO_STATUS_FAILED);
207 		return;
208 	}
209 
210 	task->status = SPDK_BDEV_IO_STATUS_SUCCESS;
211 	task->num_outstanding = 0;
212 
213 	SPDK_DEBUGLOG(bdev_malloc, "read %zu bytes from offset %#" PRIx64 ", iovcnt=%d\n",
214 		      len, offset, iovcnt);
215 
216 	src = mdisk->malloc_buf + offset;
217 
218 	for (i = 0; i < iovcnt; i++) {
219 		task->num_outstanding++;
220 		res = spdk_accel_submit_copy(ch, iov[i].iov_base,
221 					     src, iov[i].iov_len, 0, malloc_done, task);
222 
223 		if (res != 0) {
224 			malloc_done(task, res);
225 			break;
226 		}
227 
228 		src += iov[i].iov_len;
229 		len -= iov[i].iov_len;
230 	}
231 
232 	if (md_buf == NULL) {
233 		return;
234 	}
235 
236 	SPDK_DEBUGLOG(bdev_malloc, "read metadata %zu bytes from offset%#" PRIx64 "\n",
237 		      md_len, md_offset);
238 
239 	md_src = mdisk->malloc_md_buf + md_offset;
240 
241 	task->num_outstanding++;
242 	res = spdk_accel_submit_copy(ch, md_buf, md_src, md_len, 0, malloc_done, task);
243 
244 	if (res != 0) {
245 		malloc_done(task, res);
246 	}
247 }
248 
249 static void
250 bdev_malloc_writev(struct malloc_disk *mdisk, struct spdk_io_channel *ch,
251 		   struct malloc_task *task,
252 		   struct iovec *iov, int iovcnt, size_t len, uint64_t offset,
253 		   void *md_buf, size_t md_len, uint64_t md_offset)
254 {
255 
256 	int64_t res = 0;
257 	void *dst;
258 	void *md_dst;
259 	int i;
260 
261 	if (bdev_malloc_check_iov_len(iov, iovcnt, len)) {
262 		spdk_bdev_io_complete(spdk_bdev_io_from_ctx(task),
263 				      SPDK_BDEV_IO_STATUS_FAILED);
264 		return;
265 	}
266 
267 	SPDK_DEBUGLOG(bdev_malloc, "wrote %zu bytes to offset %#" PRIx64 ", iovcnt=%d\n",
268 		      len, offset, iovcnt);
269 
270 	dst = mdisk->malloc_buf + offset;
271 
272 	task->status = SPDK_BDEV_IO_STATUS_SUCCESS;
273 	task->num_outstanding = 0;
274 
275 	for (i = 0; i < iovcnt; i++) {
276 		task->num_outstanding++;
277 		res = spdk_accel_submit_copy(ch, dst, iov[i].iov_base,
278 					     iov[i].iov_len, 0, malloc_done, task);
279 
280 		if (res != 0) {
281 			malloc_done(task, res);
282 			break;
283 		}
284 
285 		dst += iov[i].iov_len;
286 	}
287 
288 	if (md_buf == NULL) {
289 		return;
290 	}
291 	SPDK_DEBUGLOG(bdev_malloc, "wrote metadata %zu bytes to offset %#" PRIx64 "\n",
292 		      md_len, md_offset);
293 
294 	md_dst = mdisk->malloc_md_buf + md_offset;
295 
296 	task->num_outstanding++;
297 	res = spdk_accel_submit_copy(ch, md_dst, md_buf, md_len, 0, malloc_done, task);
298 
299 	if (res != 0) {
300 		malloc_done(task, res);
301 	}
302 
303 }
304 
305 static int
306 bdev_malloc_unmap(struct malloc_disk *mdisk,
307 		  struct spdk_io_channel *ch,
308 		  struct malloc_task *task,
309 		  uint64_t offset,
310 		  uint64_t byte_count)
311 {
312 	task->status = SPDK_BDEV_IO_STATUS_SUCCESS;
313 	task->num_outstanding = 1;
314 
315 	return spdk_accel_submit_fill(ch, mdisk->malloc_buf + offset, 0,
316 				      byte_count, 0, malloc_done, task);
317 }
318 
319 static void
320 bdev_malloc_copy(struct malloc_disk *mdisk, struct spdk_io_channel *ch,
321 		 struct malloc_task *task,
322 		 uint64_t dst_offset, uint64_t src_offset, size_t len)
323 {
324 	int64_t res = 0;
325 	void *dst = mdisk->malloc_buf + dst_offset;
326 	void *src = mdisk->malloc_buf + src_offset;
327 
328 	SPDK_DEBUGLOG(bdev_malloc, "Copy %zu bytes from offset %#" PRIx64 " to offset %#" PRIx64 "\n",
329 		      len, src_offset, dst_offset);
330 
331 	task->status = SPDK_BDEV_IO_STATUS_SUCCESS;
332 	task->num_outstanding = 1;
333 
334 	res = spdk_accel_submit_copy(ch, dst, src, len, 0, malloc_done, task);
335 	if (res != 0) {
336 		malloc_done(task, res);
337 	}
338 }
339 
340 static int
341 _bdev_malloc_submit_request(struct malloc_channel *mch, struct spdk_bdev_io *bdev_io)
342 {
343 	uint32_t block_size = bdev_io->bdev->blocklen;
344 	uint32_t md_size = bdev_io->bdev->md_len;
345 	int rc;
346 
347 	switch (bdev_io->type) {
348 	case SPDK_BDEV_IO_TYPE_READ:
349 		if (bdev_io->u.bdev.iovs[0].iov_base == NULL) {
350 			assert(bdev_io->u.bdev.iovcnt == 1);
351 			bdev_io->u.bdev.iovs[0].iov_base =
352 				((struct malloc_disk *)bdev_io->bdev->ctxt)->malloc_buf +
353 				bdev_io->u.bdev.offset_blocks * block_size;
354 			bdev_io->u.bdev.iovs[0].iov_len = bdev_io->u.bdev.num_blocks * block_size;
355 			malloc_complete_task((struct malloc_task *)bdev_io->driver_ctx, mch,
356 					     SPDK_BDEV_IO_STATUS_SUCCESS);
357 			return 0;
358 		}
359 
360 		bdev_malloc_readv((struct malloc_disk *)bdev_io->bdev->ctxt,
361 				  mch->accel_channel,
362 				  (struct malloc_task *)bdev_io->driver_ctx,
363 				  bdev_io->u.bdev.iovs,
364 				  bdev_io->u.bdev.iovcnt,
365 				  bdev_io->u.bdev.num_blocks * block_size,
366 				  bdev_io->u.bdev.offset_blocks * block_size,
367 				  bdev_io->u.bdev.md_buf,
368 				  bdev_io->u.bdev.num_blocks * md_size,
369 				  bdev_io->u.bdev.offset_blocks * md_size);
370 		return 0;
371 
372 	case SPDK_BDEV_IO_TYPE_WRITE:
373 		if (bdev_io->bdev->dif_type != SPDK_DIF_DISABLE) {
374 			rc = malloc_verify_pi(bdev_io);
375 			if (rc != 0) {
376 				malloc_complete_task((struct malloc_task *)bdev_io->driver_ctx, mch,
377 						     SPDK_BDEV_IO_STATUS_FAILED);
378 				return 0;
379 			}
380 		}
381 
382 		bdev_malloc_writev((struct malloc_disk *)bdev_io->bdev->ctxt,
383 				   mch->accel_channel,
384 				   (struct malloc_task *)bdev_io->driver_ctx,
385 				   bdev_io->u.bdev.iovs,
386 				   bdev_io->u.bdev.iovcnt,
387 				   bdev_io->u.bdev.num_blocks * block_size,
388 				   bdev_io->u.bdev.offset_blocks * block_size,
389 				   bdev_io->u.bdev.md_buf,
390 				   bdev_io->u.bdev.num_blocks * md_size,
391 				   bdev_io->u.bdev.offset_blocks * md_size);
392 		return 0;
393 
394 	case SPDK_BDEV_IO_TYPE_RESET:
395 		malloc_complete_task((struct malloc_task *)bdev_io->driver_ctx, mch,
396 				     SPDK_BDEV_IO_STATUS_SUCCESS);
397 		return 0;
398 
399 	case SPDK_BDEV_IO_TYPE_FLUSH:
400 		malloc_complete_task((struct malloc_task *)bdev_io->driver_ctx, mch,
401 				     SPDK_BDEV_IO_STATUS_SUCCESS);
402 		return 0;
403 
404 	case SPDK_BDEV_IO_TYPE_UNMAP:
405 		return bdev_malloc_unmap((struct malloc_disk *)bdev_io->bdev->ctxt,
406 					 mch->accel_channel,
407 					 (struct malloc_task *)bdev_io->driver_ctx,
408 					 bdev_io->u.bdev.offset_blocks * block_size,
409 					 bdev_io->u.bdev.num_blocks * block_size);
410 
411 	case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
412 		/* bdev_malloc_unmap is implemented with a call to mem_cpy_fill which zeroes out all of the requested bytes. */
413 		return bdev_malloc_unmap((struct malloc_disk *)bdev_io->bdev->ctxt,
414 					 mch->accel_channel,
415 					 (struct malloc_task *)bdev_io->driver_ctx,
416 					 bdev_io->u.bdev.offset_blocks * block_size,
417 					 bdev_io->u.bdev.num_blocks * block_size);
418 
419 	case SPDK_BDEV_IO_TYPE_ZCOPY:
420 		if (bdev_io->u.bdev.zcopy.start) {
421 			void *buf;
422 			size_t len;
423 
424 			buf = ((struct malloc_disk *)bdev_io->bdev->ctxt)->malloc_buf +
425 			      bdev_io->u.bdev.offset_blocks * block_size;
426 			len = bdev_io->u.bdev.num_blocks * block_size;
427 			spdk_bdev_io_set_buf(bdev_io, buf, len);
428 
429 		}
430 		malloc_complete_task((struct malloc_task *)bdev_io->driver_ctx, mch,
431 				     SPDK_BDEV_IO_STATUS_SUCCESS);
432 		return 0;
433 	case SPDK_BDEV_IO_TYPE_ABORT:
434 		malloc_complete_task((struct malloc_task *)bdev_io->driver_ctx, mch,
435 				     SPDK_BDEV_IO_STATUS_FAILED);
436 		return 0;
437 	case SPDK_BDEV_IO_TYPE_COPY:
438 		bdev_malloc_copy((struct malloc_disk *)bdev_io->bdev->ctxt,
439 				 mch->accel_channel,
440 				 (struct malloc_task *)bdev_io->driver_ctx,
441 				 bdev_io->u.bdev.offset_blocks * block_size,
442 				 bdev_io->u.bdev.copy.src_offset_blocks * block_size,
443 				 bdev_io->u.bdev.num_blocks * block_size);
444 		return 0;
445 
446 	default:
447 		return -1;
448 	}
449 	return 0;
450 }
451 
452 static void
453 bdev_malloc_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
454 {
455 	struct malloc_channel *mch = spdk_io_channel_get_ctx(ch);
456 
457 	if (_bdev_malloc_submit_request(mch, bdev_io) != 0) {
458 		malloc_complete_task((struct malloc_task *)bdev_io->driver_ctx, mch,
459 				     SPDK_BDEV_IO_STATUS_FAILED);
460 	}
461 }
462 
463 static bool
464 bdev_malloc_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type)
465 {
466 	switch (io_type) {
467 	case SPDK_BDEV_IO_TYPE_READ:
468 	case SPDK_BDEV_IO_TYPE_WRITE:
469 	case SPDK_BDEV_IO_TYPE_FLUSH:
470 	case SPDK_BDEV_IO_TYPE_RESET:
471 	case SPDK_BDEV_IO_TYPE_UNMAP:
472 	case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
473 	case SPDK_BDEV_IO_TYPE_ZCOPY:
474 	case SPDK_BDEV_IO_TYPE_ABORT:
475 	case SPDK_BDEV_IO_TYPE_COPY:
476 		return true;
477 
478 	default:
479 		return false;
480 	}
481 }
482 
483 static struct spdk_io_channel *
484 bdev_malloc_get_io_channel(void *ctx)
485 {
486 	return spdk_get_io_channel(&g_malloc_disks);
487 }
488 
489 static void
490 bdev_malloc_write_json_config(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w)
491 {
492 	char uuid_str[SPDK_UUID_STRING_LEN];
493 
494 	spdk_json_write_object_begin(w);
495 
496 	spdk_json_write_named_string(w, "method", "bdev_malloc_create");
497 
498 	spdk_json_write_named_object_begin(w, "params");
499 	spdk_json_write_named_string(w, "name", bdev->name);
500 	spdk_json_write_named_uint64(w, "num_blocks", bdev->blockcnt);
501 	spdk_json_write_named_uint32(w, "block_size", bdev->blocklen);
502 	spdk_json_write_named_uint32(w, "physical_block_size", bdev->phys_blocklen);
503 	spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &bdev->uuid);
504 	spdk_json_write_named_string(w, "uuid", uuid_str);
505 	spdk_json_write_named_uint32(w, "optimal_io_boundary", bdev->optimal_io_boundary);
506 
507 	spdk_json_write_object_end(w);
508 
509 	spdk_json_write_object_end(w);
510 }
511 
512 static const struct spdk_bdev_fn_table malloc_fn_table = {
513 	.destruct		= bdev_malloc_destruct,
514 	.submit_request		= bdev_malloc_submit_request,
515 	.io_type_supported	= bdev_malloc_io_type_supported,
516 	.get_io_channel		= bdev_malloc_get_io_channel,
517 	.write_config_json	= bdev_malloc_write_json_config,
518 };
519 
520 static int
521 malloc_disk_setup_pi(struct malloc_disk *mdisk)
522 {
523 	struct spdk_bdev *bdev = &mdisk->disk;
524 	struct spdk_dif_ctx dif_ctx;
525 	struct iovec iov, md_iov;
526 	int rc;
527 
528 	rc = spdk_dif_ctx_init(&dif_ctx,
529 			       bdev->blocklen,
530 			       bdev->md_len,
531 			       bdev->md_interleave,
532 			       bdev->dif_is_head_of_md,
533 			       bdev->dif_type,
534 			       bdev->dif_check_flags,
535 			       0,	/* configure the whole buffers */
536 			       0, 0, 0, 0);
537 	if (rc != 0) {
538 		SPDK_ERRLOG("Initialization of DIF/DIX context failed\n");
539 		return rc;
540 	}
541 
542 	iov.iov_base = mdisk->malloc_buf;
543 	iov.iov_len = bdev->blockcnt * bdev->blocklen;
544 
545 	if (mdisk->disk.md_interleave) {
546 		rc = spdk_dif_generate(&iov, 1, bdev->blockcnt, &dif_ctx);
547 	} else {
548 		md_iov.iov_base = mdisk->malloc_md_buf;
549 		md_iov.iov_len = bdev->blockcnt * bdev->md_len;
550 
551 		rc = spdk_dix_generate(&iov, 1, &md_iov, bdev->blockcnt, &dif_ctx);
552 	}
553 
554 	if (rc != 0) {
555 		SPDK_ERRLOG("Formatting by DIF/DIX failed\n");
556 	}
557 
558 	return rc;
559 }
560 
561 int
562 create_malloc_disk(struct spdk_bdev **bdev, const struct malloc_bdev_opts *opts)
563 {
564 	struct malloc_disk *mdisk;
565 	uint32_t block_size;
566 	int rc;
567 
568 	assert(opts != NULL);
569 
570 	if (opts->num_blocks == 0) {
571 		SPDK_ERRLOG("Disk num_blocks must be greater than 0");
572 		return -EINVAL;
573 	}
574 
575 	if (opts->block_size % 512) {
576 		SPDK_ERRLOG("Data block size must be 512 bytes aligned\n");
577 		return -EINVAL;
578 	}
579 
580 	if (opts->physical_block_size % 512) {
581 		SPDK_ERRLOG("Physical block must be 512 bytes aligned\n");
582 		return -EINVAL;
583 	}
584 
585 	switch (opts->md_size) {
586 	case 0:
587 	case 8:
588 	case 16:
589 	case 32:
590 	case 64:
591 	case 128:
592 		break;
593 	default:
594 		SPDK_ERRLOG("metadata size %u is not supported\n", opts->md_size);
595 		return -EINVAL;
596 	}
597 
598 	if (opts->md_interleave) {
599 		block_size = opts->block_size + opts->md_size;
600 	} else {
601 		block_size = opts->block_size;
602 	}
603 
604 	if (opts->dif_type < SPDK_DIF_DISABLE || opts->dif_type > SPDK_DIF_TYPE3) {
605 		SPDK_ERRLOG("DIF type is invalid\n");
606 		return -EINVAL;
607 	}
608 
609 	if (opts->dif_type != SPDK_DIF_DISABLE && opts->md_size == 0) {
610 		SPDK_ERRLOG("Metadata size should not be zero if DIF is enabled\n");
611 		return -EINVAL;
612 	}
613 
614 	mdisk = calloc(1, sizeof(*mdisk));
615 	if (!mdisk) {
616 		SPDK_ERRLOG("mdisk calloc() failed\n");
617 		return -ENOMEM;
618 	}
619 
620 	/*
621 	 * Allocate the large backend memory buffer from pinned memory.
622 	 *
623 	 * TODO: need to pass a hint so we know which socket to allocate
624 	 *  from on multi-socket systems.
625 	 */
626 	mdisk->malloc_buf = spdk_zmalloc(opts->num_blocks * block_size, 2 * 1024 * 1024, NULL,
627 					 SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
628 	if (!mdisk->malloc_buf) {
629 		SPDK_ERRLOG("malloc_buf spdk_zmalloc() failed\n");
630 		malloc_disk_free(mdisk);
631 		return -ENOMEM;
632 	}
633 
634 	if (!opts->md_interleave && opts->md_size != 0) {
635 		mdisk->malloc_md_buf = spdk_zmalloc(opts->num_blocks * opts->md_size, 2 * 1024 * 1024, NULL,
636 						    SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
637 		if (!mdisk->malloc_md_buf) {
638 			SPDK_ERRLOG("malloc_md_buf spdk_zmalloc() failed\n");
639 			malloc_disk_free(mdisk);
640 			return -ENOMEM;
641 		}
642 	}
643 
644 	if (opts->name) {
645 		mdisk->disk.name = strdup(opts->name);
646 	} else {
647 		/* Auto-generate a name */
648 		mdisk->disk.name = spdk_sprintf_alloc("Malloc%d", malloc_disk_count);
649 		malloc_disk_count++;
650 	}
651 	if (!mdisk->disk.name) {
652 		malloc_disk_free(mdisk);
653 		return -ENOMEM;
654 	}
655 	mdisk->disk.product_name = "Malloc disk";
656 
657 	mdisk->disk.write_cache = 1;
658 	mdisk->disk.blocklen = block_size;
659 	mdisk->disk.phys_blocklen = opts->physical_block_size;
660 	mdisk->disk.blockcnt = opts->num_blocks;
661 	mdisk->disk.md_len = opts->md_size;
662 	mdisk->disk.md_interleave = opts->md_interleave;
663 	mdisk->disk.dif_type = opts->dif_type;
664 	mdisk->disk.dif_is_head_of_md = opts->dif_is_head_of_md;
665 	/* Current block device layer API does not propagate
666 	 * any DIF related information from user. So, we can
667 	 * not generate or verify Application Tag.
668 	 */
669 	switch (opts->dif_type) {
670 	case SPDK_DIF_TYPE1:
671 	case SPDK_DIF_TYPE2:
672 		mdisk->disk.dif_check_flags = SPDK_DIF_FLAGS_GUARD_CHECK |
673 					      SPDK_DIF_FLAGS_REFTAG_CHECK;
674 		break;
675 	case SPDK_DIF_TYPE3:
676 		mdisk->disk.dif_check_flags = SPDK_DIF_FLAGS_GUARD_CHECK;
677 		break;
678 	case SPDK_DIF_DISABLE:
679 		break;
680 	}
681 
682 	if (opts->dif_type != SPDK_DIF_DISABLE) {
683 		rc = malloc_disk_setup_pi(mdisk);
684 		if (rc) {
685 			SPDK_ERRLOG("Failed to set up protection information.\n");
686 			malloc_disk_free(mdisk);
687 			return rc;
688 		}
689 	}
690 
691 	if (opts->optimal_io_boundary) {
692 		mdisk->disk.optimal_io_boundary = opts->optimal_io_boundary;
693 		mdisk->disk.split_on_optimal_io_boundary = true;
694 	}
695 	if (!spdk_mem_all_zero(&opts->uuid, sizeof(opts->uuid))) {
696 		spdk_uuid_copy(&mdisk->disk.uuid, &opts->uuid);
697 	}
698 
699 	mdisk->disk.max_copy = 0;
700 	mdisk->disk.ctxt = mdisk;
701 	mdisk->disk.fn_table = &malloc_fn_table;
702 	mdisk->disk.module = &malloc_if;
703 
704 	rc = spdk_bdev_register(&mdisk->disk);
705 	if (rc) {
706 		malloc_disk_free(mdisk);
707 		return rc;
708 	}
709 
710 	*bdev = &(mdisk->disk);
711 
712 	TAILQ_INSERT_TAIL(&g_malloc_disks, mdisk, link);
713 
714 	return rc;
715 }
716 
717 void
718 delete_malloc_disk(const char *name, spdk_delete_malloc_complete cb_fn, void *cb_arg)
719 {
720 	int rc;
721 
722 	rc = spdk_bdev_unregister_by_name(name, &malloc_if, cb_fn, cb_arg);
723 	if (rc != 0) {
724 		cb_fn(cb_arg, rc);
725 	}
726 }
727 
728 static int
729 malloc_completion_poller(void *ctx)
730 {
731 	struct malloc_channel *ch = ctx;
732 	struct malloc_task *task;
733 	TAILQ_HEAD(, malloc_task) completed_tasks;
734 	uint32_t num_completions = 0;
735 
736 	TAILQ_INIT(&completed_tasks);
737 	TAILQ_SWAP(&completed_tasks, &ch->completed_tasks, malloc_task, tailq);
738 
739 	while (!TAILQ_EMPTY(&completed_tasks)) {
740 		task = TAILQ_FIRST(&completed_tasks);
741 		TAILQ_REMOVE(&completed_tasks, task, tailq);
742 		spdk_bdev_io_complete(spdk_bdev_io_from_ctx(task), task->status);
743 		num_completions++;
744 	}
745 
746 	return num_completions > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
747 }
748 
749 static int
750 malloc_create_channel_cb(void *io_device, void *ctx)
751 {
752 	struct malloc_channel *ch = ctx;
753 
754 	ch->accel_channel = spdk_accel_get_io_channel();
755 	if (!ch->accel_channel) {
756 		SPDK_ERRLOG("Failed to get accel framework's IO channel\n");
757 		return -ENOMEM;
758 	}
759 
760 	ch->completion_poller = SPDK_POLLER_REGISTER(malloc_completion_poller, ch, 0);
761 	if (!ch->completion_poller) {
762 		SPDK_ERRLOG("Failed to register malloc completion poller\n");
763 		spdk_put_io_channel(ch->accel_channel);
764 		return -ENOMEM;
765 	}
766 
767 	TAILQ_INIT(&ch->completed_tasks);
768 
769 	return 0;
770 }
771 
772 static void
773 malloc_destroy_channel_cb(void *io_device, void *ctx)
774 {
775 	struct malloc_channel *ch = ctx;
776 
777 	assert(TAILQ_EMPTY(&ch->completed_tasks));
778 
779 	spdk_put_io_channel(ch->accel_channel);
780 	spdk_poller_unregister(&ch->completion_poller);
781 }
782 
783 static int
784 bdev_malloc_initialize(void)
785 {
786 	/* This needs to be reset for each reinitialization of submodules.
787 	 * Otherwise after enough devices or reinitializations the value gets too high.
788 	 * TODO: Make malloc bdev name mandatory and remove this counter. */
789 	malloc_disk_count = 0;
790 
791 	spdk_io_device_register(&g_malloc_disks, malloc_create_channel_cb,
792 				malloc_destroy_channel_cb, sizeof(struct malloc_channel),
793 				"bdev_malloc");
794 
795 	return 0;
796 }
797 
798 static void
799 bdev_malloc_deinitialize(void)
800 {
801 	spdk_io_device_unregister(&g_malloc_disks, NULL);
802 }
803 
804 SPDK_LOG_REGISTER_COMPONENT(bdev_malloc)
805