xref: /spdk/module/bdev/malloc/bdev_malloc.c (revision 838e61c3772fdefb17e1a0b8f9880e2bcb9c4c0d)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "bdev_malloc.h"
10 #include "spdk/endian.h"
11 #include "spdk/env.h"
12 #include "spdk/accel.h"
13 #include "spdk/dma.h"
14 #include "spdk/likely.h"
15 #include "spdk/string.h"
16 
17 #include "spdk/log.h"
18 
19 struct malloc_disk {
20 	struct spdk_bdev		disk;
21 	void				*malloc_buf;
22 	void				*malloc_md_buf;
23 	TAILQ_ENTRY(malloc_disk)	link;
24 };
25 
26 struct malloc_task {
27 	struct iovec			iov;
28 	int				num_outstanding;
29 	enum spdk_bdev_io_status	status;
30 	TAILQ_ENTRY(malloc_task)	tailq;
31 };
32 
33 struct malloc_channel {
34 	struct spdk_io_channel		*accel_channel;
35 	struct spdk_poller		*completion_poller;
36 	TAILQ_HEAD(, malloc_task)	completed_tasks;
37 };
38 
39 static int
40 malloc_verify_pi(struct spdk_bdev_io *bdev_io)
41 {
42 	struct spdk_bdev *bdev = bdev_io->bdev;
43 	struct spdk_dif_ctx dif_ctx;
44 	struct spdk_dif_error err_blk;
45 	int rc;
46 
47 	assert(bdev_io->u.bdev.memory_domain == NULL);
48 	rc = spdk_dif_ctx_init(&dif_ctx,
49 			       bdev->blocklen,
50 			       bdev->md_len,
51 			       bdev->md_interleave,
52 			       bdev->dif_is_head_of_md,
53 			       bdev->dif_type,
54 			       bdev->dif_check_flags,
55 			       bdev_io->u.bdev.offset_blocks & 0xFFFFFFFF,
56 			       0xFFFF, 0, 0, 0);
57 	if (rc != 0) {
58 		SPDK_ERRLOG("Failed to initialize DIF/DIX context\n");
59 		return rc;
60 	}
61 
62 	if (spdk_bdev_is_md_interleaved(bdev)) {
63 		rc = spdk_dif_verify(bdev_io->u.bdev.iovs,
64 				     bdev_io->u.bdev.iovcnt,
65 				     bdev_io->u.bdev.num_blocks,
66 				     &dif_ctx,
67 				     &err_blk);
68 	} else {
69 		struct iovec md_iov = {
70 			.iov_base	= bdev_io->u.bdev.md_buf,
71 			.iov_len	= bdev_io->u.bdev.num_blocks * bdev->md_len,
72 		};
73 
74 		rc = spdk_dix_verify(bdev_io->u.bdev.iovs,
75 				     bdev_io->u.bdev.iovcnt,
76 				     &md_iov,
77 				     bdev_io->u.bdev.num_blocks,
78 				     &dif_ctx,
79 				     &err_blk);
80 	}
81 
82 	if (rc != 0) {
83 		SPDK_ERRLOG("DIF/DIX verify failed: lba %" PRIu64 ", num_blocks %" PRIu64 ", "
84 			    "err_type %u, expected %u, actual %u, err_offset %u\n",
85 			    bdev_io->u.bdev.offset_blocks,
86 			    bdev_io->u.bdev.num_blocks,
87 			    err_blk.err_type,
88 			    err_blk.expected,
89 			    err_blk.actual,
90 			    err_blk.err_offset);
91 	}
92 
93 	return rc;
94 }
95 
96 static void
97 malloc_done(void *ref, int status)
98 {
99 	struct malloc_task *task = (struct malloc_task *)ref;
100 	struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(task);
101 	int rc;
102 
103 	if (status != 0) {
104 		if (status == -ENOMEM) {
105 			if (task->status == SPDK_BDEV_IO_STATUS_SUCCESS) {
106 				task->status = SPDK_BDEV_IO_STATUS_NOMEM;
107 			}
108 		} else {
109 			task->status = SPDK_BDEV_IO_STATUS_FAILED;
110 		}
111 	}
112 
113 	if (--task->num_outstanding != 0) {
114 		return;
115 	}
116 
117 	if (bdev_io->bdev->dif_type != SPDK_DIF_DISABLE &&
118 	    bdev_io->type == SPDK_BDEV_IO_TYPE_READ &&
119 	    task->status == SPDK_BDEV_IO_STATUS_SUCCESS) {
120 		rc = malloc_verify_pi(bdev_io);
121 		if (rc != 0) {
122 			task->status = SPDK_BDEV_IO_STATUS_FAILED;
123 		}
124 	}
125 
126 	assert(!bdev_io->u.bdev.accel_sequence || task->status == SPDK_BDEV_IO_STATUS_NOMEM);
127 	spdk_bdev_io_complete(spdk_bdev_io_from_ctx(task), task->status);
128 }
129 
130 static void
131 malloc_complete_task(struct malloc_task *task, struct malloc_channel *mch,
132 		     enum spdk_bdev_io_status status)
133 {
134 	task->status = status;
135 	TAILQ_INSERT_TAIL(&mch->completed_tasks, task, tailq);
136 }
137 
138 static TAILQ_HEAD(, malloc_disk) g_malloc_disks = TAILQ_HEAD_INITIALIZER(g_malloc_disks);
139 
140 int malloc_disk_count = 0;
141 
142 static int bdev_malloc_initialize(void);
143 static void bdev_malloc_deinitialize(void);
144 
145 static int
146 bdev_malloc_get_ctx_size(void)
147 {
148 	return sizeof(struct malloc_task);
149 }
150 
151 static struct spdk_bdev_module malloc_if = {
152 	.name = "malloc",
153 	.module_init = bdev_malloc_initialize,
154 	.module_fini = bdev_malloc_deinitialize,
155 	.get_ctx_size = bdev_malloc_get_ctx_size,
156 
157 };
158 
159 SPDK_BDEV_MODULE_REGISTER(malloc, &malloc_if)
160 
161 static void
162 malloc_disk_free(struct malloc_disk *malloc_disk)
163 {
164 	if (!malloc_disk) {
165 		return;
166 	}
167 
168 	free(malloc_disk->disk.name);
169 	spdk_free(malloc_disk->malloc_buf);
170 	spdk_free(malloc_disk->malloc_md_buf);
171 	free(malloc_disk);
172 }
173 
174 static int
175 bdev_malloc_destruct(void *ctx)
176 {
177 	struct malloc_disk *malloc_disk = ctx;
178 
179 	TAILQ_REMOVE(&g_malloc_disks, malloc_disk, link);
180 	malloc_disk_free(malloc_disk);
181 	return 0;
182 }
183 
184 static int
185 bdev_malloc_check_iov_len(struct iovec *iovs, int iovcnt, size_t nbytes)
186 {
187 	int i;
188 
189 	for (i = 0; i < iovcnt; i++) {
190 		if (nbytes < iovs[i].iov_len) {
191 			return 0;
192 		}
193 
194 		nbytes -= iovs[i].iov_len;
195 	}
196 
197 	return nbytes != 0;
198 }
199 
200 static void
201 malloc_sequence_fail(struct malloc_task *task, int status)
202 {
203 	struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(task);
204 
205 	/* For ENOMEM, the IO will be retried by the bdev layer, so we don't abort the sequence */
206 	if (status != -ENOMEM) {
207 		spdk_accel_sequence_abort(bdev_io->u.bdev.accel_sequence);
208 		bdev_io->u.bdev.accel_sequence = NULL;
209 	}
210 
211 	malloc_done(task, status);
212 }
213 
214 static void
215 malloc_sequence_done(void *ctx, int status)
216 {
217 	struct malloc_task *task = ctx;
218 	struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(task);
219 
220 	bdev_io->u.bdev.accel_sequence = NULL;
221 	/* Prevent bdev layer from retrying the request if the sequence failed with ENOMEM */
222 	malloc_done(task, status != -ENOMEM ? status : -EFAULT);
223 }
224 
225 static void
226 bdev_malloc_readv(struct malloc_disk *mdisk, struct spdk_io_channel *ch,
227 		  struct malloc_task *task, struct spdk_bdev_io *bdev_io)
228 {
229 	uint64_t len, offset, md_offset;
230 	int res = 0;
231 	size_t md_len;
232 
233 	len = bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen;
234 	offset = bdev_io->u.bdev.offset_blocks * bdev_io->bdev->blocklen;
235 
236 	if (bdev_malloc_check_iov_len(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, len)) {
237 		spdk_bdev_io_complete(spdk_bdev_io_from_ctx(task),
238 				      SPDK_BDEV_IO_STATUS_FAILED);
239 		return;
240 	}
241 
242 	task->status = SPDK_BDEV_IO_STATUS_SUCCESS;
243 	task->num_outstanding = 0;
244 	task->iov.iov_base = mdisk->malloc_buf + offset;
245 	task->iov.iov_len = len;
246 
247 	SPDK_DEBUGLOG(bdev_malloc, "read %zu bytes from offset %#" PRIx64 ", iovcnt=%d\n",
248 		      len, offset, bdev_io->u.bdev.iovcnt);
249 
250 	task->num_outstanding++;
251 	res = spdk_accel_append_copy(&bdev_io->u.bdev.accel_sequence, ch,
252 				     bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
253 				     bdev_io->u.bdev.memory_domain,
254 				     bdev_io->u.bdev.memory_domain_ctx,
255 				     &task->iov, 1, NULL, NULL, 0, NULL, NULL);
256 	if (spdk_unlikely(res != 0)) {
257 		malloc_sequence_fail(task, res);
258 		return;
259 	}
260 
261 	spdk_accel_sequence_reverse(bdev_io->u.bdev.accel_sequence);
262 	spdk_accel_sequence_finish(bdev_io->u.bdev.accel_sequence, malloc_sequence_done, task);
263 
264 	if (bdev_io->u.bdev.md_buf == NULL) {
265 		return;
266 	}
267 
268 	md_len = bdev_io->u.bdev.num_blocks * bdev_io->bdev->md_len;
269 	md_offset = bdev_io->u.bdev.offset_blocks * bdev_io->bdev->md_len;
270 
271 	SPDK_DEBUGLOG(bdev_malloc, "read metadata %zu bytes from offset%#" PRIx64 "\n",
272 		      md_len, md_offset);
273 
274 	task->num_outstanding++;
275 	res = spdk_accel_submit_copy(ch, bdev_io->u.bdev.md_buf, mdisk->malloc_md_buf + md_offset,
276 				     md_len, 0, malloc_done, task);
277 	if (res != 0) {
278 		malloc_done(task, res);
279 	}
280 }
281 
282 static void
283 bdev_malloc_writev(struct malloc_disk *mdisk, struct spdk_io_channel *ch,
284 		   struct malloc_task *task, struct spdk_bdev_io *bdev_io)
285 {
286 	uint64_t len, offset, md_offset;
287 	int res = 0;
288 	size_t md_len;
289 
290 	len = bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen;
291 	offset = bdev_io->u.bdev.offset_blocks * bdev_io->bdev->blocklen;
292 
293 	if (bdev_malloc_check_iov_len(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, len)) {
294 		spdk_bdev_io_complete(spdk_bdev_io_from_ctx(task),
295 				      SPDK_BDEV_IO_STATUS_FAILED);
296 		return;
297 	}
298 
299 	task->status = SPDK_BDEV_IO_STATUS_SUCCESS;
300 	task->num_outstanding = 0;
301 	task->iov.iov_base = mdisk->malloc_buf + offset;
302 	task->iov.iov_len = len;
303 
304 	SPDK_DEBUGLOG(bdev_malloc, "wrote %zu bytes to offset %#" PRIx64 ", iovcnt=%d\n",
305 		      len, offset, bdev_io->u.bdev.iovcnt);
306 
307 	task->num_outstanding++;
308 	res = spdk_accel_append_copy(&bdev_io->u.bdev.accel_sequence, ch, &task->iov, 1, NULL, NULL,
309 				     bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
310 				     bdev_io->u.bdev.memory_domain,
311 				     bdev_io->u.bdev.memory_domain_ctx, 0, NULL, NULL);
312 	if (spdk_unlikely(res != 0)) {
313 		malloc_sequence_fail(task, res);
314 		return;
315 	}
316 
317 	spdk_accel_sequence_finish(bdev_io->u.bdev.accel_sequence, malloc_sequence_done, task);
318 
319 	if (bdev_io->u.bdev.md_buf == NULL) {
320 		return;
321 	}
322 
323 	md_len = bdev_io->u.bdev.num_blocks * bdev_io->bdev->md_len;
324 	md_offset = bdev_io->u.bdev.offset_blocks * bdev_io->bdev->md_len;
325 
326 	SPDK_DEBUGLOG(bdev_malloc, "wrote metadata %zu bytes to offset %#" PRIx64 "\n",
327 		      md_len, md_offset);
328 
329 	task->num_outstanding++;
330 	res = spdk_accel_submit_copy(ch, mdisk->malloc_md_buf + md_offset, bdev_io->u.bdev.md_buf,
331 				     md_len, 0, malloc_done, task);
332 	if (res != 0) {
333 		malloc_done(task, res);
334 	}
335 }
336 
337 static int
338 bdev_malloc_unmap(struct malloc_disk *mdisk,
339 		  struct spdk_io_channel *ch,
340 		  struct malloc_task *task,
341 		  uint64_t offset,
342 		  uint64_t byte_count)
343 {
344 	task->status = SPDK_BDEV_IO_STATUS_SUCCESS;
345 	task->num_outstanding = 1;
346 
347 	return spdk_accel_submit_fill(ch, mdisk->malloc_buf + offset, 0,
348 				      byte_count, 0, malloc_done, task);
349 }
350 
351 static void
352 bdev_malloc_copy(struct malloc_disk *mdisk, struct spdk_io_channel *ch,
353 		 struct malloc_task *task,
354 		 uint64_t dst_offset, uint64_t src_offset, size_t len)
355 {
356 	int64_t res = 0;
357 	void *dst = mdisk->malloc_buf + dst_offset;
358 	void *src = mdisk->malloc_buf + src_offset;
359 
360 	SPDK_DEBUGLOG(bdev_malloc, "Copy %zu bytes from offset %#" PRIx64 " to offset %#" PRIx64 "\n",
361 		      len, src_offset, dst_offset);
362 
363 	task->status = SPDK_BDEV_IO_STATUS_SUCCESS;
364 	task->num_outstanding = 1;
365 
366 	res = spdk_accel_submit_copy(ch, dst, src, len, 0, malloc_done, task);
367 	if (res != 0) {
368 		malloc_done(task, res);
369 	}
370 }
371 
372 static int
373 _bdev_malloc_submit_request(struct malloc_channel *mch, struct spdk_bdev_io *bdev_io)
374 {
375 	struct malloc_task *task = (struct malloc_task *)bdev_io->driver_ctx;
376 	struct malloc_disk *disk = bdev_io->bdev->ctxt;
377 	uint32_t block_size = bdev_io->bdev->blocklen;
378 	int rc;
379 
380 	switch (bdev_io->type) {
381 	case SPDK_BDEV_IO_TYPE_READ:
382 		if (bdev_io->u.bdev.iovs[0].iov_base == NULL) {
383 			assert(bdev_io->u.bdev.iovcnt == 1);
384 			assert(bdev_io->u.bdev.memory_domain == NULL);
385 			bdev_io->u.bdev.iovs[0].iov_base =
386 				disk->malloc_buf + bdev_io->u.bdev.offset_blocks * block_size;
387 			bdev_io->u.bdev.iovs[0].iov_len = bdev_io->u.bdev.num_blocks * block_size;
388 			malloc_complete_task(task, mch, SPDK_BDEV_IO_STATUS_SUCCESS);
389 			return 0;
390 		}
391 
392 		bdev_malloc_readv(disk, mch->accel_channel, task, bdev_io);
393 		return 0;
394 
395 	case SPDK_BDEV_IO_TYPE_WRITE:
396 		if (bdev_io->bdev->dif_type != SPDK_DIF_DISABLE) {
397 			rc = malloc_verify_pi(bdev_io);
398 			if (rc != 0) {
399 				malloc_complete_task(task, mch, SPDK_BDEV_IO_STATUS_FAILED);
400 				return 0;
401 			}
402 		}
403 
404 		bdev_malloc_writev(disk, mch->accel_channel, task, bdev_io);
405 		return 0;
406 
407 	case SPDK_BDEV_IO_TYPE_RESET:
408 		malloc_complete_task(task, mch, SPDK_BDEV_IO_STATUS_SUCCESS);
409 		return 0;
410 
411 	case SPDK_BDEV_IO_TYPE_FLUSH:
412 		malloc_complete_task(task, mch, SPDK_BDEV_IO_STATUS_SUCCESS);
413 		return 0;
414 
415 	case SPDK_BDEV_IO_TYPE_UNMAP:
416 		return bdev_malloc_unmap(disk, mch->accel_channel, task,
417 					 bdev_io->u.bdev.offset_blocks * block_size,
418 					 bdev_io->u.bdev.num_blocks * block_size);
419 
420 	case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
421 		/* bdev_malloc_unmap is implemented with a call to mem_cpy_fill which zeroes out all of the requested bytes. */
422 		return bdev_malloc_unmap(disk, mch->accel_channel, task,
423 					 bdev_io->u.bdev.offset_blocks * block_size,
424 					 bdev_io->u.bdev.num_blocks * block_size);
425 
426 	case SPDK_BDEV_IO_TYPE_ZCOPY:
427 		if (bdev_io->u.bdev.zcopy.start) {
428 			void *buf;
429 			size_t len;
430 
431 			buf = disk->malloc_buf + bdev_io->u.bdev.offset_blocks * block_size;
432 			len = bdev_io->u.bdev.num_blocks * block_size;
433 			spdk_bdev_io_set_buf(bdev_io, buf, len);
434 
435 		}
436 		malloc_complete_task(task, mch, SPDK_BDEV_IO_STATUS_SUCCESS);
437 		return 0;
438 	case SPDK_BDEV_IO_TYPE_ABORT:
439 		malloc_complete_task(task, mch, SPDK_BDEV_IO_STATUS_FAILED);
440 		return 0;
441 	case SPDK_BDEV_IO_TYPE_COPY:
442 		bdev_malloc_copy(disk, mch->accel_channel, task,
443 				 bdev_io->u.bdev.offset_blocks * block_size,
444 				 bdev_io->u.bdev.copy.src_offset_blocks * block_size,
445 				 bdev_io->u.bdev.num_blocks * block_size);
446 		return 0;
447 
448 	default:
449 		return -1;
450 	}
451 	return 0;
452 }
453 
454 static void
455 bdev_malloc_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
456 {
457 	struct malloc_channel *mch = spdk_io_channel_get_ctx(ch);
458 
459 	if (_bdev_malloc_submit_request(mch, bdev_io) != 0) {
460 		malloc_complete_task((struct malloc_task *)bdev_io->driver_ctx, mch,
461 				     SPDK_BDEV_IO_STATUS_FAILED);
462 	}
463 }
464 
465 static bool
466 bdev_malloc_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type)
467 {
468 	switch (io_type) {
469 	case SPDK_BDEV_IO_TYPE_READ:
470 	case SPDK_BDEV_IO_TYPE_WRITE:
471 	case SPDK_BDEV_IO_TYPE_FLUSH:
472 	case SPDK_BDEV_IO_TYPE_RESET:
473 	case SPDK_BDEV_IO_TYPE_UNMAP:
474 	case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
475 	case SPDK_BDEV_IO_TYPE_ZCOPY:
476 	case SPDK_BDEV_IO_TYPE_ABORT:
477 	case SPDK_BDEV_IO_TYPE_COPY:
478 		return true;
479 
480 	default:
481 		return false;
482 	}
483 }
484 
485 static struct spdk_io_channel *
486 bdev_malloc_get_io_channel(void *ctx)
487 {
488 	return spdk_get_io_channel(&g_malloc_disks);
489 }
490 
491 static void
492 bdev_malloc_write_json_config(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w)
493 {
494 	char uuid_str[SPDK_UUID_STRING_LEN];
495 
496 	spdk_json_write_object_begin(w);
497 
498 	spdk_json_write_named_string(w, "method", "bdev_malloc_create");
499 
500 	spdk_json_write_named_object_begin(w, "params");
501 	spdk_json_write_named_string(w, "name", bdev->name);
502 	spdk_json_write_named_uint64(w, "num_blocks", bdev->blockcnt);
503 	spdk_json_write_named_uint32(w, "block_size", bdev->blocklen);
504 	spdk_json_write_named_uint32(w, "physical_block_size", bdev->phys_blocklen);
505 	spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &bdev->uuid);
506 	spdk_json_write_named_string(w, "uuid", uuid_str);
507 	spdk_json_write_named_uint32(w, "optimal_io_boundary", bdev->optimal_io_boundary);
508 
509 	spdk_json_write_object_end(w);
510 
511 	spdk_json_write_object_end(w);
512 }
513 
514 static int
515 bdev_malloc_get_memory_domains(void *ctx, struct spdk_memory_domain **domains, int array_size)
516 {
517 	struct malloc_disk *malloc_disk = ctx;
518 	struct spdk_memory_domain *domain;
519 	int num_domains = 0;
520 
521 	if (malloc_disk->disk.dif_type != SPDK_DIF_DISABLE) {
522 		return 0;
523 	}
524 
525 	/* Report support for every memory domain */
526 	for (domain = spdk_memory_domain_get_first(NULL); domain != NULL;
527 	     domain = spdk_memory_domain_get_next(domain, NULL)) {
528 		if (domains != NULL && num_domains < array_size) {
529 			domains[num_domains] = domain;
530 		}
531 		num_domains++;
532 	}
533 
534 	return num_domains;
535 }
536 
537 static bool
538 bdev_malloc_accel_sequence_supported(void *ctx, enum spdk_bdev_io_type type)
539 {
540 	struct malloc_disk *malloc_disk = ctx;
541 
542 	if (malloc_disk->disk.dif_type != SPDK_DIF_DISABLE) {
543 		return false;
544 	}
545 
546 	switch (type) {
547 	case SPDK_BDEV_IO_TYPE_READ:
548 	case SPDK_BDEV_IO_TYPE_WRITE:
549 		return true;
550 	default:
551 		return false;
552 	}
553 }
554 
555 static const struct spdk_bdev_fn_table malloc_fn_table = {
556 	.destruct			= bdev_malloc_destruct,
557 	.submit_request			= bdev_malloc_submit_request,
558 	.io_type_supported		= bdev_malloc_io_type_supported,
559 	.get_io_channel			= bdev_malloc_get_io_channel,
560 	.write_config_json		= bdev_malloc_write_json_config,
561 	.get_memory_domains		= bdev_malloc_get_memory_domains,
562 	.accel_sequence_supported	= bdev_malloc_accel_sequence_supported,
563 };
564 
565 static int
566 malloc_disk_setup_pi(struct malloc_disk *mdisk)
567 {
568 	struct spdk_bdev *bdev = &mdisk->disk;
569 	struct spdk_dif_ctx dif_ctx;
570 	struct iovec iov, md_iov;
571 	int rc;
572 
573 	rc = spdk_dif_ctx_init(&dif_ctx,
574 			       bdev->blocklen,
575 			       bdev->md_len,
576 			       bdev->md_interleave,
577 			       bdev->dif_is_head_of_md,
578 			       bdev->dif_type,
579 			       bdev->dif_check_flags,
580 			       0,	/* configure the whole buffers */
581 			       0, 0, 0, 0);
582 	if (rc != 0) {
583 		SPDK_ERRLOG("Initialization of DIF/DIX context failed\n");
584 		return rc;
585 	}
586 
587 	iov.iov_base = mdisk->malloc_buf;
588 	iov.iov_len = bdev->blockcnt * bdev->blocklen;
589 
590 	if (mdisk->disk.md_interleave) {
591 		rc = spdk_dif_generate(&iov, 1, bdev->blockcnt, &dif_ctx);
592 	} else {
593 		md_iov.iov_base = mdisk->malloc_md_buf;
594 		md_iov.iov_len = bdev->blockcnt * bdev->md_len;
595 
596 		rc = spdk_dix_generate(&iov, 1, &md_iov, bdev->blockcnt, &dif_ctx);
597 	}
598 
599 	if (rc != 0) {
600 		SPDK_ERRLOG("Formatting by DIF/DIX failed\n");
601 	}
602 
603 	return rc;
604 }
605 
606 int
607 create_malloc_disk(struct spdk_bdev **bdev, const struct malloc_bdev_opts *opts)
608 {
609 	struct malloc_disk *mdisk;
610 	uint32_t block_size;
611 	int rc;
612 
613 	assert(opts != NULL);
614 
615 	if (opts->num_blocks == 0) {
616 		SPDK_ERRLOG("Disk num_blocks must be greater than 0");
617 		return -EINVAL;
618 	}
619 
620 	if (opts->block_size % 512) {
621 		SPDK_ERRLOG("Data block size must be 512 bytes aligned\n");
622 		return -EINVAL;
623 	}
624 
625 	if (opts->physical_block_size % 512) {
626 		SPDK_ERRLOG("Physical block must be 512 bytes aligned\n");
627 		return -EINVAL;
628 	}
629 
630 	switch (opts->md_size) {
631 	case 0:
632 	case 8:
633 	case 16:
634 	case 32:
635 	case 64:
636 	case 128:
637 		break;
638 	default:
639 		SPDK_ERRLOG("metadata size %u is not supported\n", opts->md_size);
640 		return -EINVAL;
641 	}
642 
643 	if (opts->md_interleave) {
644 		block_size = opts->block_size + opts->md_size;
645 	} else {
646 		block_size = opts->block_size;
647 	}
648 
649 	if (opts->dif_type < SPDK_DIF_DISABLE || opts->dif_type > SPDK_DIF_TYPE3) {
650 		SPDK_ERRLOG("DIF type is invalid\n");
651 		return -EINVAL;
652 	}
653 
654 	if (opts->dif_type != SPDK_DIF_DISABLE && opts->md_size == 0) {
655 		SPDK_ERRLOG("Metadata size should not be zero if DIF is enabled\n");
656 		return -EINVAL;
657 	}
658 
659 	mdisk = calloc(1, sizeof(*mdisk));
660 	if (!mdisk) {
661 		SPDK_ERRLOG("mdisk calloc() failed\n");
662 		return -ENOMEM;
663 	}
664 
665 	/*
666 	 * Allocate the large backend memory buffer from pinned memory.
667 	 *
668 	 * TODO: need to pass a hint so we know which socket to allocate
669 	 *  from on multi-socket systems.
670 	 */
671 	mdisk->malloc_buf = spdk_zmalloc(opts->num_blocks * block_size, 2 * 1024 * 1024, NULL,
672 					 SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
673 	if (!mdisk->malloc_buf) {
674 		SPDK_ERRLOG("malloc_buf spdk_zmalloc() failed\n");
675 		malloc_disk_free(mdisk);
676 		return -ENOMEM;
677 	}
678 
679 	if (!opts->md_interleave && opts->md_size != 0) {
680 		mdisk->malloc_md_buf = spdk_zmalloc(opts->num_blocks * opts->md_size, 2 * 1024 * 1024, NULL,
681 						    SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
682 		if (!mdisk->malloc_md_buf) {
683 			SPDK_ERRLOG("malloc_md_buf spdk_zmalloc() failed\n");
684 			malloc_disk_free(mdisk);
685 			return -ENOMEM;
686 		}
687 	}
688 
689 	if (opts->name) {
690 		mdisk->disk.name = strdup(opts->name);
691 	} else {
692 		/* Auto-generate a name */
693 		mdisk->disk.name = spdk_sprintf_alloc("Malloc%d", malloc_disk_count);
694 		malloc_disk_count++;
695 	}
696 	if (!mdisk->disk.name) {
697 		malloc_disk_free(mdisk);
698 		return -ENOMEM;
699 	}
700 	mdisk->disk.product_name = "Malloc disk";
701 
702 	mdisk->disk.write_cache = 1;
703 	mdisk->disk.blocklen = block_size;
704 	mdisk->disk.phys_blocklen = opts->physical_block_size;
705 	mdisk->disk.blockcnt = opts->num_blocks;
706 	mdisk->disk.md_len = opts->md_size;
707 	mdisk->disk.md_interleave = opts->md_interleave;
708 	mdisk->disk.dif_type = opts->dif_type;
709 	mdisk->disk.dif_is_head_of_md = opts->dif_is_head_of_md;
710 	/* Current block device layer API does not propagate
711 	 * any DIF related information from user. So, we can
712 	 * not generate or verify Application Tag.
713 	 */
714 	switch (opts->dif_type) {
715 	case SPDK_DIF_TYPE1:
716 	case SPDK_DIF_TYPE2:
717 		mdisk->disk.dif_check_flags = SPDK_DIF_FLAGS_GUARD_CHECK |
718 					      SPDK_DIF_FLAGS_REFTAG_CHECK;
719 		break;
720 	case SPDK_DIF_TYPE3:
721 		mdisk->disk.dif_check_flags = SPDK_DIF_FLAGS_GUARD_CHECK;
722 		break;
723 	case SPDK_DIF_DISABLE:
724 		break;
725 	}
726 
727 	if (opts->dif_type != SPDK_DIF_DISABLE) {
728 		rc = malloc_disk_setup_pi(mdisk);
729 		if (rc) {
730 			SPDK_ERRLOG("Failed to set up protection information.\n");
731 			malloc_disk_free(mdisk);
732 			return rc;
733 		}
734 	}
735 
736 	if (opts->optimal_io_boundary) {
737 		mdisk->disk.optimal_io_boundary = opts->optimal_io_boundary;
738 		mdisk->disk.split_on_optimal_io_boundary = true;
739 	}
740 	if (!spdk_mem_all_zero(&opts->uuid, sizeof(opts->uuid))) {
741 		spdk_uuid_copy(&mdisk->disk.uuid, &opts->uuid);
742 	}
743 
744 	mdisk->disk.max_copy = 0;
745 	mdisk->disk.ctxt = mdisk;
746 	mdisk->disk.fn_table = &malloc_fn_table;
747 	mdisk->disk.module = &malloc_if;
748 
749 	rc = spdk_bdev_register(&mdisk->disk);
750 	if (rc) {
751 		malloc_disk_free(mdisk);
752 		return rc;
753 	}
754 
755 	*bdev = &(mdisk->disk);
756 
757 	TAILQ_INSERT_TAIL(&g_malloc_disks, mdisk, link);
758 
759 	return rc;
760 }
761 
762 void
763 delete_malloc_disk(const char *name, spdk_delete_malloc_complete cb_fn, void *cb_arg)
764 {
765 	int rc;
766 
767 	rc = spdk_bdev_unregister_by_name(name, &malloc_if, cb_fn, cb_arg);
768 	if (rc != 0) {
769 		cb_fn(cb_arg, rc);
770 	}
771 }
772 
773 static int
774 malloc_completion_poller(void *ctx)
775 {
776 	struct malloc_channel *ch = ctx;
777 	struct malloc_task *task;
778 	TAILQ_HEAD(, malloc_task) completed_tasks;
779 	uint32_t num_completions = 0;
780 
781 	TAILQ_INIT(&completed_tasks);
782 	TAILQ_SWAP(&completed_tasks, &ch->completed_tasks, malloc_task, tailq);
783 
784 	while (!TAILQ_EMPTY(&completed_tasks)) {
785 		task = TAILQ_FIRST(&completed_tasks);
786 		TAILQ_REMOVE(&completed_tasks, task, tailq);
787 		spdk_bdev_io_complete(spdk_bdev_io_from_ctx(task), task->status);
788 		num_completions++;
789 	}
790 
791 	return num_completions > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
792 }
793 
794 static int
795 malloc_create_channel_cb(void *io_device, void *ctx)
796 {
797 	struct malloc_channel *ch = ctx;
798 
799 	ch->accel_channel = spdk_accel_get_io_channel();
800 	if (!ch->accel_channel) {
801 		SPDK_ERRLOG("Failed to get accel framework's IO channel\n");
802 		return -ENOMEM;
803 	}
804 
805 	ch->completion_poller = SPDK_POLLER_REGISTER(malloc_completion_poller, ch, 0);
806 	if (!ch->completion_poller) {
807 		SPDK_ERRLOG("Failed to register malloc completion poller\n");
808 		spdk_put_io_channel(ch->accel_channel);
809 		return -ENOMEM;
810 	}
811 
812 	TAILQ_INIT(&ch->completed_tasks);
813 
814 	return 0;
815 }
816 
817 static void
818 malloc_destroy_channel_cb(void *io_device, void *ctx)
819 {
820 	struct malloc_channel *ch = ctx;
821 
822 	assert(TAILQ_EMPTY(&ch->completed_tasks));
823 
824 	spdk_put_io_channel(ch->accel_channel);
825 	spdk_poller_unregister(&ch->completion_poller);
826 }
827 
828 static int
829 bdev_malloc_initialize(void)
830 {
831 	/* This needs to be reset for each reinitialization of submodules.
832 	 * Otherwise after enough devices or reinitializations the value gets too high.
833 	 * TODO: Make malloc bdev name mandatory and remove this counter. */
834 	malloc_disk_count = 0;
835 
836 	spdk_io_device_register(&g_malloc_disks, malloc_create_channel_cb,
837 				malloc_destroy_channel_cb, sizeof(struct malloc_channel),
838 				"bdev_malloc");
839 
840 	return 0;
841 }
842 
843 static void
844 bdev_malloc_deinitialize(void)
845 {
846 	spdk_io_device_unregister(&g_malloc_disks, NULL);
847 }
848 
849 SPDK_LOG_REGISTER_COMPONENT(bdev_malloc)
850