xref: /spdk/module/bdev/malloc/bdev_malloc.c (revision 60982c759db49b4f4579f16e3b24df0725ba4b94)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "bdev_malloc.h"
10 #include "spdk/endian.h"
11 #include "spdk/env.h"
12 #include "spdk/accel.h"
13 #include "spdk/dma.h"
14 #include "spdk/likely.h"
15 #include "spdk/string.h"
16 
17 #include "spdk/log.h"
18 
19 struct malloc_disk {
20 	struct spdk_bdev		disk;
21 	void				*malloc_buf;
22 	void				*malloc_md_buf;
23 	TAILQ_ENTRY(malloc_disk)	link;
24 };
25 
26 struct malloc_task {
27 	struct iovec			iov;
28 	int				num_outstanding;
29 	enum spdk_bdev_io_status	status;
30 	TAILQ_ENTRY(malloc_task)	tailq;
31 };
32 
33 struct malloc_channel {
34 	struct spdk_io_channel		*accel_channel;
35 	struct spdk_poller		*completion_poller;
36 	TAILQ_HEAD(, malloc_task)	completed_tasks;
37 };
38 
39 static int
40 malloc_verify_pi(struct spdk_bdev_io *bdev_io)
41 {
42 	struct spdk_bdev *bdev = bdev_io->bdev;
43 	struct spdk_dif_ctx dif_ctx;
44 	struct spdk_dif_error err_blk;
45 	int rc;
46 	struct spdk_dif_ctx_init_ext_opts dif_opts;
47 
48 	assert(bdev_io->u.bdev.memory_domain == NULL);
49 	dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format);
50 	dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16;
51 	rc = spdk_dif_ctx_init(&dif_ctx,
52 			       bdev->blocklen,
53 			       bdev->md_len,
54 			       bdev->md_interleave,
55 			       bdev->dif_is_head_of_md,
56 			       bdev->dif_type,
57 			       bdev->dif_check_flags,
58 			       bdev_io->u.bdev.offset_blocks & 0xFFFFFFFF,
59 			       0xFFFF, 0, 0, 0, &dif_opts);
60 	if (rc != 0) {
61 		SPDK_ERRLOG("Failed to initialize DIF/DIX context\n");
62 		return rc;
63 	}
64 
65 	if (spdk_bdev_is_md_interleaved(bdev)) {
66 		rc = spdk_dif_verify(bdev_io->u.bdev.iovs,
67 				     bdev_io->u.bdev.iovcnt,
68 				     bdev_io->u.bdev.num_blocks,
69 				     &dif_ctx,
70 				     &err_blk);
71 	} else {
72 		struct iovec md_iov = {
73 			.iov_base	= bdev_io->u.bdev.md_buf,
74 			.iov_len	= bdev_io->u.bdev.num_blocks * bdev->md_len,
75 		};
76 
77 		rc = spdk_dix_verify(bdev_io->u.bdev.iovs,
78 				     bdev_io->u.bdev.iovcnt,
79 				     &md_iov,
80 				     bdev_io->u.bdev.num_blocks,
81 				     &dif_ctx,
82 				     &err_blk);
83 	}
84 
85 	if (rc != 0) {
86 		SPDK_ERRLOG("DIF/DIX verify failed: lba %" PRIu64 ", num_blocks %" PRIu64 ", "
87 			    "err_type %u, expected %lu, actual %lu, err_offset %u\n",
88 			    bdev_io->u.bdev.offset_blocks,
89 			    bdev_io->u.bdev.num_blocks,
90 			    err_blk.err_type,
91 			    err_blk.expected,
92 			    err_blk.actual,
93 			    err_blk.err_offset);
94 	}
95 
96 	return rc;
97 }
98 
99 static void
100 malloc_done(void *ref, int status)
101 {
102 	struct malloc_task *task = (struct malloc_task *)ref;
103 	struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(task);
104 	int rc;
105 
106 	if (status != 0) {
107 		if (status == -ENOMEM) {
108 			if (task->status == SPDK_BDEV_IO_STATUS_SUCCESS) {
109 				task->status = SPDK_BDEV_IO_STATUS_NOMEM;
110 			}
111 		} else {
112 			task->status = SPDK_BDEV_IO_STATUS_FAILED;
113 		}
114 	}
115 
116 	if (--task->num_outstanding != 0) {
117 		return;
118 	}
119 
120 	if (bdev_io->bdev->dif_type != SPDK_DIF_DISABLE &&
121 	    bdev_io->type == SPDK_BDEV_IO_TYPE_READ &&
122 	    task->status == SPDK_BDEV_IO_STATUS_SUCCESS) {
123 		rc = malloc_verify_pi(bdev_io);
124 		if (rc != 0) {
125 			task->status = SPDK_BDEV_IO_STATUS_FAILED;
126 		}
127 	}
128 
129 	assert(!bdev_io->u.bdev.accel_sequence || task->status == SPDK_BDEV_IO_STATUS_NOMEM);
130 	spdk_bdev_io_complete(spdk_bdev_io_from_ctx(task), task->status);
131 }
132 
133 static void
134 malloc_complete_task(struct malloc_task *task, struct malloc_channel *mch,
135 		     enum spdk_bdev_io_status status)
136 {
137 	task->status = status;
138 	TAILQ_INSERT_TAIL(&mch->completed_tasks, task, tailq);
139 }
140 
141 static TAILQ_HEAD(, malloc_disk) g_malloc_disks = TAILQ_HEAD_INITIALIZER(g_malloc_disks);
142 
143 int malloc_disk_count = 0;
144 
145 static int bdev_malloc_initialize(void);
146 static void bdev_malloc_deinitialize(void);
147 
148 static int
149 bdev_malloc_get_ctx_size(void)
150 {
151 	return sizeof(struct malloc_task);
152 }
153 
154 static struct spdk_bdev_module malloc_if = {
155 	.name = "malloc",
156 	.module_init = bdev_malloc_initialize,
157 	.module_fini = bdev_malloc_deinitialize,
158 	.get_ctx_size = bdev_malloc_get_ctx_size,
159 
160 };
161 
162 SPDK_BDEV_MODULE_REGISTER(malloc, &malloc_if)
163 
164 static void
165 malloc_disk_free(struct malloc_disk *malloc_disk)
166 {
167 	if (!malloc_disk) {
168 		return;
169 	}
170 
171 	free(malloc_disk->disk.name);
172 	spdk_free(malloc_disk->malloc_buf);
173 	spdk_free(malloc_disk->malloc_md_buf);
174 	free(malloc_disk);
175 }
176 
177 static int
178 bdev_malloc_destruct(void *ctx)
179 {
180 	struct malloc_disk *malloc_disk = ctx;
181 
182 	TAILQ_REMOVE(&g_malloc_disks, malloc_disk, link);
183 	malloc_disk_free(malloc_disk);
184 	return 0;
185 }
186 
187 static int
188 bdev_malloc_check_iov_len(struct iovec *iovs, int iovcnt, size_t nbytes)
189 {
190 	int i;
191 
192 	for (i = 0; i < iovcnt; i++) {
193 		if (nbytes < iovs[i].iov_len) {
194 			return 0;
195 		}
196 
197 		nbytes -= iovs[i].iov_len;
198 	}
199 
200 	return nbytes != 0;
201 }
202 
203 static void
204 malloc_sequence_fail(struct malloc_task *task, int status)
205 {
206 	struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(task);
207 
208 	/* For ENOMEM, the IO will be retried by the bdev layer, so we don't abort the sequence */
209 	if (status != -ENOMEM) {
210 		spdk_accel_sequence_abort(bdev_io->u.bdev.accel_sequence);
211 		bdev_io->u.bdev.accel_sequence = NULL;
212 	}
213 
214 	malloc_done(task, status);
215 }
216 
217 static void
218 malloc_sequence_done(void *ctx, int status)
219 {
220 	struct malloc_task *task = ctx;
221 	struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(task);
222 
223 	bdev_io->u.bdev.accel_sequence = NULL;
224 	/* Prevent bdev layer from retrying the request if the sequence failed with ENOMEM */
225 	malloc_done(task, status != -ENOMEM ? status : -EFAULT);
226 }
227 
228 static void
229 bdev_malloc_readv(struct malloc_disk *mdisk, struct spdk_io_channel *ch,
230 		  struct malloc_task *task, struct spdk_bdev_io *bdev_io)
231 {
232 	uint64_t len, offset, md_offset;
233 	int res = 0;
234 	size_t md_len;
235 
236 	len = bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen;
237 	offset = bdev_io->u.bdev.offset_blocks * bdev_io->bdev->blocklen;
238 
239 	if (bdev_malloc_check_iov_len(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, len)) {
240 		spdk_bdev_io_complete(spdk_bdev_io_from_ctx(task),
241 				      SPDK_BDEV_IO_STATUS_FAILED);
242 		return;
243 	}
244 
245 	task->status = SPDK_BDEV_IO_STATUS_SUCCESS;
246 	task->num_outstanding = 0;
247 	task->iov.iov_base = mdisk->malloc_buf + offset;
248 	task->iov.iov_len = len;
249 
250 	SPDK_DEBUGLOG(bdev_malloc, "read %zu bytes from offset %#" PRIx64 ", iovcnt=%d\n",
251 		      len, offset, bdev_io->u.bdev.iovcnt);
252 
253 	task->num_outstanding++;
254 	res = spdk_accel_append_copy(&bdev_io->u.bdev.accel_sequence, ch,
255 				     bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
256 				     bdev_io->u.bdev.memory_domain,
257 				     bdev_io->u.bdev.memory_domain_ctx,
258 				     &task->iov, 1, NULL, NULL, 0, NULL, NULL);
259 	if (spdk_unlikely(res != 0)) {
260 		malloc_sequence_fail(task, res);
261 		return;
262 	}
263 
264 	spdk_accel_sequence_reverse(bdev_io->u.bdev.accel_sequence);
265 	spdk_accel_sequence_finish(bdev_io->u.bdev.accel_sequence, malloc_sequence_done, task);
266 
267 	if (bdev_io->u.bdev.md_buf == NULL) {
268 		return;
269 	}
270 
271 	md_len = bdev_io->u.bdev.num_blocks * bdev_io->bdev->md_len;
272 	md_offset = bdev_io->u.bdev.offset_blocks * bdev_io->bdev->md_len;
273 
274 	SPDK_DEBUGLOG(bdev_malloc, "read metadata %zu bytes from offset%#" PRIx64 "\n",
275 		      md_len, md_offset);
276 
277 	task->num_outstanding++;
278 	res = spdk_accel_submit_copy(ch, bdev_io->u.bdev.md_buf, mdisk->malloc_md_buf + md_offset,
279 				     md_len, 0, malloc_done, task);
280 	if (res != 0) {
281 		malloc_done(task, res);
282 	}
283 }
284 
285 static void
286 bdev_malloc_writev(struct malloc_disk *mdisk, struct spdk_io_channel *ch,
287 		   struct malloc_task *task, struct spdk_bdev_io *bdev_io)
288 {
289 	uint64_t len, offset, md_offset;
290 	int res = 0;
291 	size_t md_len;
292 
293 	len = bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen;
294 	offset = bdev_io->u.bdev.offset_blocks * bdev_io->bdev->blocklen;
295 
296 	if (bdev_malloc_check_iov_len(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, len)) {
297 		spdk_bdev_io_complete(spdk_bdev_io_from_ctx(task),
298 				      SPDK_BDEV_IO_STATUS_FAILED);
299 		return;
300 	}
301 
302 	task->status = SPDK_BDEV_IO_STATUS_SUCCESS;
303 	task->num_outstanding = 0;
304 	task->iov.iov_base = mdisk->malloc_buf + offset;
305 	task->iov.iov_len = len;
306 
307 	SPDK_DEBUGLOG(bdev_malloc, "wrote %zu bytes to offset %#" PRIx64 ", iovcnt=%d\n",
308 		      len, offset, bdev_io->u.bdev.iovcnt);
309 
310 	task->num_outstanding++;
311 	res = spdk_accel_append_copy(&bdev_io->u.bdev.accel_sequence, ch, &task->iov, 1, NULL, NULL,
312 				     bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
313 				     bdev_io->u.bdev.memory_domain,
314 				     bdev_io->u.bdev.memory_domain_ctx, 0, NULL, NULL);
315 	if (spdk_unlikely(res != 0)) {
316 		malloc_sequence_fail(task, res);
317 		return;
318 	}
319 
320 	spdk_accel_sequence_finish(bdev_io->u.bdev.accel_sequence, malloc_sequence_done, task);
321 
322 	if (bdev_io->u.bdev.md_buf == NULL) {
323 		return;
324 	}
325 
326 	md_len = bdev_io->u.bdev.num_blocks * bdev_io->bdev->md_len;
327 	md_offset = bdev_io->u.bdev.offset_blocks * bdev_io->bdev->md_len;
328 
329 	SPDK_DEBUGLOG(bdev_malloc, "wrote metadata %zu bytes to offset %#" PRIx64 "\n",
330 		      md_len, md_offset);
331 
332 	task->num_outstanding++;
333 	res = spdk_accel_submit_copy(ch, mdisk->malloc_md_buf + md_offset, bdev_io->u.bdev.md_buf,
334 				     md_len, 0, malloc_done, task);
335 	if (res != 0) {
336 		malloc_done(task, res);
337 	}
338 }
339 
340 static int
341 bdev_malloc_unmap(struct malloc_disk *mdisk,
342 		  struct spdk_io_channel *ch,
343 		  struct malloc_task *task,
344 		  uint64_t offset,
345 		  uint64_t byte_count)
346 {
347 	task->status = SPDK_BDEV_IO_STATUS_SUCCESS;
348 	task->num_outstanding = 1;
349 
350 	return spdk_accel_submit_fill(ch, mdisk->malloc_buf + offset, 0,
351 				      byte_count, 0, malloc_done, task);
352 }
353 
354 static void
355 bdev_malloc_copy(struct malloc_disk *mdisk, struct spdk_io_channel *ch,
356 		 struct malloc_task *task,
357 		 uint64_t dst_offset, uint64_t src_offset, size_t len)
358 {
359 	int64_t res = 0;
360 	void *dst = mdisk->malloc_buf + dst_offset;
361 	void *src = mdisk->malloc_buf + src_offset;
362 
363 	SPDK_DEBUGLOG(bdev_malloc, "Copy %zu bytes from offset %#" PRIx64 " to offset %#" PRIx64 "\n",
364 		      len, src_offset, dst_offset);
365 
366 	task->status = SPDK_BDEV_IO_STATUS_SUCCESS;
367 	task->num_outstanding = 1;
368 
369 	res = spdk_accel_submit_copy(ch, dst, src, len, 0, malloc_done, task);
370 	if (res != 0) {
371 		malloc_done(task, res);
372 	}
373 }
374 
375 static int
376 _bdev_malloc_submit_request(struct malloc_channel *mch, struct spdk_bdev_io *bdev_io)
377 {
378 	struct malloc_task *task = (struct malloc_task *)bdev_io->driver_ctx;
379 	struct malloc_disk *disk = bdev_io->bdev->ctxt;
380 	uint32_t block_size = bdev_io->bdev->blocklen;
381 	int rc;
382 
383 	switch (bdev_io->type) {
384 	case SPDK_BDEV_IO_TYPE_READ:
385 		if (bdev_io->u.bdev.iovs[0].iov_base == NULL) {
386 			assert(bdev_io->u.bdev.iovcnt == 1);
387 			assert(bdev_io->u.bdev.memory_domain == NULL);
388 			bdev_io->u.bdev.iovs[0].iov_base =
389 				disk->malloc_buf + bdev_io->u.bdev.offset_blocks * block_size;
390 			bdev_io->u.bdev.iovs[0].iov_len = bdev_io->u.bdev.num_blocks * block_size;
391 			malloc_complete_task(task, mch, SPDK_BDEV_IO_STATUS_SUCCESS);
392 			return 0;
393 		}
394 
395 		bdev_malloc_readv(disk, mch->accel_channel, task, bdev_io);
396 		return 0;
397 
398 	case SPDK_BDEV_IO_TYPE_WRITE:
399 		if (bdev_io->bdev->dif_type != SPDK_DIF_DISABLE) {
400 			rc = malloc_verify_pi(bdev_io);
401 			if (rc != 0) {
402 				malloc_complete_task(task, mch, SPDK_BDEV_IO_STATUS_FAILED);
403 				return 0;
404 			}
405 		}
406 
407 		bdev_malloc_writev(disk, mch->accel_channel, task, bdev_io);
408 		return 0;
409 
410 	case SPDK_BDEV_IO_TYPE_RESET:
411 		malloc_complete_task(task, mch, SPDK_BDEV_IO_STATUS_SUCCESS);
412 		return 0;
413 
414 	case SPDK_BDEV_IO_TYPE_FLUSH:
415 		malloc_complete_task(task, mch, SPDK_BDEV_IO_STATUS_SUCCESS);
416 		return 0;
417 
418 	case SPDK_BDEV_IO_TYPE_UNMAP:
419 		return bdev_malloc_unmap(disk, mch->accel_channel, task,
420 					 bdev_io->u.bdev.offset_blocks * block_size,
421 					 bdev_io->u.bdev.num_blocks * block_size);
422 
423 	case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
424 		/* bdev_malloc_unmap is implemented with a call to mem_cpy_fill which zeroes out all of the requested bytes. */
425 		return bdev_malloc_unmap(disk, mch->accel_channel, task,
426 					 bdev_io->u.bdev.offset_blocks * block_size,
427 					 bdev_io->u.bdev.num_blocks * block_size);
428 
429 	case SPDK_BDEV_IO_TYPE_ZCOPY:
430 		if (bdev_io->u.bdev.zcopy.start) {
431 			void *buf;
432 			size_t len;
433 
434 			buf = disk->malloc_buf + bdev_io->u.bdev.offset_blocks * block_size;
435 			len = bdev_io->u.bdev.num_blocks * block_size;
436 			spdk_bdev_io_set_buf(bdev_io, buf, len);
437 
438 		}
439 		malloc_complete_task(task, mch, SPDK_BDEV_IO_STATUS_SUCCESS);
440 		return 0;
441 	case SPDK_BDEV_IO_TYPE_ABORT:
442 		malloc_complete_task(task, mch, SPDK_BDEV_IO_STATUS_FAILED);
443 		return 0;
444 	case SPDK_BDEV_IO_TYPE_COPY:
445 		bdev_malloc_copy(disk, mch->accel_channel, task,
446 				 bdev_io->u.bdev.offset_blocks * block_size,
447 				 bdev_io->u.bdev.copy.src_offset_blocks * block_size,
448 				 bdev_io->u.bdev.num_blocks * block_size);
449 		return 0;
450 
451 	default:
452 		return -1;
453 	}
454 	return 0;
455 }
456 
457 static void
458 bdev_malloc_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
459 {
460 	struct malloc_channel *mch = spdk_io_channel_get_ctx(ch);
461 
462 	if (_bdev_malloc_submit_request(mch, bdev_io) != 0) {
463 		malloc_complete_task((struct malloc_task *)bdev_io->driver_ctx, mch,
464 				     SPDK_BDEV_IO_STATUS_FAILED);
465 	}
466 }
467 
468 static bool
469 bdev_malloc_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type)
470 {
471 	switch (io_type) {
472 	case SPDK_BDEV_IO_TYPE_READ:
473 	case SPDK_BDEV_IO_TYPE_WRITE:
474 	case SPDK_BDEV_IO_TYPE_FLUSH:
475 	case SPDK_BDEV_IO_TYPE_RESET:
476 	case SPDK_BDEV_IO_TYPE_UNMAP:
477 	case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
478 	case SPDK_BDEV_IO_TYPE_ZCOPY:
479 	case SPDK_BDEV_IO_TYPE_ABORT:
480 	case SPDK_BDEV_IO_TYPE_COPY:
481 		return true;
482 
483 	default:
484 		return false;
485 	}
486 }
487 
488 static struct spdk_io_channel *
489 bdev_malloc_get_io_channel(void *ctx)
490 {
491 	return spdk_get_io_channel(&g_malloc_disks);
492 }
493 
494 static void
495 bdev_malloc_write_json_config(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w)
496 {
497 	char uuid_str[SPDK_UUID_STRING_LEN];
498 
499 	spdk_json_write_object_begin(w);
500 
501 	spdk_json_write_named_string(w, "method", "bdev_malloc_create");
502 
503 	spdk_json_write_named_object_begin(w, "params");
504 	spdk_json_write_named_string(w, "name", bdev->name);
505 	spdk_json_write_named_uint64(w, "num_blocks", bdev->blockcnt);
506 	spdk_json_write_named_uint32(w, "block_size", bdev->blocklen);
507 	spdk_json_write_named_uint32(w, "physical_block_size", bdev->phys_blocklen);
508 	spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &bdev->uuid);
509 	spdk_json_write_named_string(w, "uuid", uuid_str);
510 	spdk_json_write_named_uint32(w, "optimal_io_boundary", bdev->optimal_io_boundary);
511 
512 	spdk_json_write_object_end(w);
513 
514 	spdk_json_write_object_end(w);
515 }
516 
517 static int
518 bdev_malloc_get_memory_domains(void *ctx, struct spdk_memory_domain **domains, int array_size)
519 {
520 	struct malloc_disk *malloc_disk = ctx;
521 	struct spdk_memory_domain *domain;
522 	int num_domains = 0;
523 
524 	if (malloc_disk->disk.dif_type != SPDK_DIF_DISABLE) {
525 		return 0;
526 	}
527 
528 	/* Report support for every memory domain */
529 	for (domain = spdk_memory_domain_get_first(NULL); domain != NULL;
530 	     domain = spdk_memory_domain_get_next(domain, NULL)) {
531 		if (domains != NULL && num_domains < array_size) {
532 			domains[num_domains] = domain;
533 		}
534 		num_domains++;
535 	}
536 
537 	return num_domains;
538 }
539 
540 static bool
541 bdev_malloc_accel_sequence_supported(void *ctx, enum spdk_bdev_io_type type)
542 {
543 	struct malloc_disk *malloc_disk = ctx;
544 
545 	if (malloc_disk->disk.dif_type != SPDK_DIF_DISABLE) {
546 		return false;
547 	}
548 
549 	switch (type) {
550 	case SPDK_BDEV_IO_TYPE_READ:
551 	case SPDK_BDEV_IO_TYPE_WRITE:
552 		return true;
553 	default:
554 		return false;
555 	}
556 }
557 
558 static const struct spdk_bdev_fn_table malloc_fn_table = {
559 	.destruct			= bdev_malloc_destruct,
560 	.submit_request			= bdev_malloc_submit_request,
561 	.io_type_supported		= bdev_malloc_io_type_supported,
562 	.get_io_channel			= bdev_malloc_get_io_channel,
563 	.write_config_json		= bdev_malloc_write_json_config,
564 	.get_memory_domains		= bdev_malloc_get_memory_domains,
565 	.accel_sequence_supported	= bdev_malloc_accel_sequence_supported,
566 };
567 
568 static int
569 malloc_disk_setup_pi(struct malloc_disk *mdisk)
570 {
571 	struct spdk_bdev *bdev = &mdisk->disk;
572 	struct spdk_dif_ctx dif_ctx;
573 	struct iovec iov, md_iov;
574 	int rc;
575 	struct spdk_dif_ctx_init_ext_opts dif_opts;
576 
577 	dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format);
578 	dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16;
579 	rc = spdk_dif_ctx_init(&dif_ctx,
580 			       bdev->blocklen,
581 			       bdev->md_len,
582 			       bdev->md_interleave,
583 			       bdev->dif_is_head_of_md,
584 			       bdev->dif_type,
585 			       bdev->dif_check_flags,
586 			       0,	/* configure the whole buffers */
587 			       0, 0, 0, 0, &dif_opts);
588 	if (rc != 0) {
589 		SPDK_ERRLOG("Initialization of DIF/DIX context failed\n");
590 		return rc;
591 	}
592 
593 	iov.iov_base = mdisk->malloc_buf;
594 	iov.iov_len = bdev->blockcnt * bdev->blocklen;
595 
596 	if (mdisk->disk.md_interleave) {
597 		rc = spdk_dif_generate(&iov, 1, bdev->blockcnt, &dif_ctx);
598 	} else {
599 		md_iov.iov_base = mdisk->malloc_md_buf;
600 		md_iov.iov_len = bdev->blockcnt * bdev->md_len;
601 
602 		rc = spdk_dix_generate(&iov, 1, &md_iov, bdev->blockcnt, &dif_ctx);
603 	}
604 
605 	if (rc != 0) {
606 		SPDK_ERRLOG("Formatting by DIF/DIX failed\n");
607 	}
608 
609 	return rc;
610 }
611 
612 int
613 create_malloc_disk(struct spdk_bdev **bdev, const struct malloc_bdev_opts *opts)
614 {
615 	struct malloc_disk *mdisk;
616 	uint32_t block_size;
617 	int rc;
618 
619 	assert(opts != NULL);
620 
621 	if (opts->num_blocks == 0) {
622 		SPDK_ERRLOG("Disk num_blocks must be greater than 0");
623 		return -EINVAL;
624 	}
625 
626 	if (opts->block_size % 512) {
627 		SPDK_ERRLOG("Data block size must be 512 bytes aligned\n");
628 		return -EINVAL;
629 	}
630 
631 	if (opts->physical_block_size % 512) {
632 		SPDK_ERRLOG("Physical block must be 512 bytes aligned\n");
633 		return -EINVAL;
634 	}
635 
636 	switch (opts->md_size) {
637 	case 0:
638 	case 8:
639 	case 16:
640 	case 32:
641 	case 64:
642 	case 128:
643 		break;
644 	default:
645 		SPDK_ERRLOG("metadata size %u is not supported\n", opts->md_size);
646 		return -EINVAL;
647 	}
648 
649 	if (opts->md_interleave) {
650 		block_size = opts->block_size + opts->md_size;
651 	} else {
652 		block_size = opts->block_size;
653 	}
654 
655 	if (opts->dif_type < SPDK_DIF_DISABLE || opts->dif_type > SPDK_DIF_TYPE3) {
656 		SPDK_ERRLOG("DIF type is invalid\n");
657 		return -EINVAL;
658 	}
659 
660 	if (opts->dif_type != SPDK_DIF_DISABLE && opts->md_size == 0) {
661 		SPDK_ERRLOG("Metadata size should not be zero if DIF is enabled\n");
662 		return -EINVAL;
663 	}
664 
665 	mdisk = calloc(1, sizeof(*mdisk));
666 	if (!mdisk) {
667 		SPDK_ERRLOG("mdisk calloc() failed\n");
668 		return -ENOMEM;
669 	}
670 
671 	/*
672 	 * Allocate the large backend memory buffer from pinned memory.
673 	 *
674 	 * TODO: need to pass a hint so we know which socket to allocate
675 	 *  from on multi-socket systems.
676 	 */
677 	mdisk->malloc_buf = spdk_zmalloc(opts->num_blocks * block_size, 2 * 1024 * 1024, NULL,
678 					 SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
679 	if (!mdisk->malloc_buf) {
680 		SPDK_ERRLOG("malloc_buf spdk_zmalloc() failed\n");
681 		malloc_disk_free(mdisk);
682 		return -ENOMEM;
683 	}
684 
685 	if (!opts->md_interleave && opts->md_size != 0) {
686 		mdisk->malloc_md_buf = spdk_zmalloc(opts->num_blocks * opts->md_size, 2 * 1024 * 1024, NULL,
687 						    SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
688 		if (!mdisk->malloc_md_buf) {
689 			SPDK_ERRLOG("malloc_md_buf spdk_zmalloc() failed\n");
690 			malloc_disk_free(mdisk);
691 			return -ENOMEM;
692 		}
693 	}
694 
695 	if (opts->name) {
696 		mdisk->disk.name = strdup(opts->name);
697 	} else {
698 		/* Auto-generate a name */
699 		mdisk->disk.name = spdk_sprintf_alloc("Malloc%d", malloc_disk_count);
700 		malloc_disk_count++;
701 	}
702 	if (!mdisk->disk.name) {
703 		malloc_disk_free(mdisk);
704 		return -ENOMEM;
705 	}
706 	mdisk->disk.product_name = "Malloc disk";
707 
708 	mdisk->disk.write_cache = 1;
709 	mdisk->disk.blocklen = block_size;
710 	mdisk->disk.phys_blocklen = opts->physical_block_size;
711 	mdisk->disk.blockcnt = opts->num_blocks;
712 	mdisk->disk.md_len = opts->md_size;
713 	mdisk->disk.md_interleave = opts->md_interleave;
714 	mdisk->disk.dif_type = opts->dif_type;
715 	mdisk->disk.dif_is_head_of_md = opts->dif_is_head_of_md;
716 	/* Current block device layer API does not propagate
717 	 * any DIF related information from user. So, we can
718 	 * not generate or verify Application Tag.
719 	 */
720 	switch (opts->dif_type) {
721 	case SPDK_DIF_TYPE1:
722 	case SPDK_DIF_TYPE2:
723 		mdisk->disk.dif_check_flags = SPDK_DIF_FLAGS_GUARD_CHECK |
724 					      SPDK_DIF_FLAGS_REFTAG_CHECK;
725 		break;
726 	case SPDK_DIF_TYPE3:
727 		mdisk->disk.dif_check_flags = SPDK_DIF_FLAGS_GUARD_CHECK;
728 		break;
729 	case SPDK_DIF_DISABLE:
730 		break;
731 	}
732 
733 	if (opts->dif_type != SPDK_DIF_DISABLE) {
734 		rc = malloc_disk_setup_pi(mdisk);
735 		if (rc) {
736 			SPDK_ERRLOG("Failed to set up protection information.\n");
737 			malloc_disk_free(mdisk);
738 			return rc;
739 		}
740 	}
741 
742 	if (opts->optimal_io_boundary) {
743 		mdisk->disk.optimal_io_boundary = opts->optimal_io_boundary;
744 		mdisk->disk.split_on_optimal_io_boundary = true;
745 	}
746 	if (!spdk_uuid_is_null(&opts->uuid)) {
747 		spdk_uuid_copy(&mdisk->disk.uuid, &opts->uuid);
748 	}
749 
750 	mdisk->disk.max_copy = 0;
751 	mdisk->disk.ctxt = mdisk;
752 	mdisk->disk.fn_table = &malloc_fn_table;
753 	mdisk->disk.module = &malloc_if;
754 
755 	rc = spdk_bdev_register(&mdisk->disk);
756 	if (rc) {
757 		malloc_disk_free(mdisk);
758 		return rc;
759 	}
760 
761 	*bdev = &(mdisk->disk);
762 
763 	TAILQ_INSERT_TAIL(&g_malloc_disks, mdisk, link);
764 
765 	return rc;
766 }
767 
768 void
769 delete_malloc_disk(const char *name, spdk_delete_malloc_complete cb_fn, void *cb_arg)
770 {
771 	int rc;
772 
773 	rc = spdk_bdev_unregister_by_name(name, &malloc_if, cb_fn, cb_arg);
774 	if (rc != 0) {
775 		cb_fn(cb_arg, rc);
776 	}
777 }
778 
779 static int
780 malloc_completion_poller(void *ctx)
781 {
782 	struct malloc_channel *ch = ctx;
783 	struct malloc_task *task;
784 	TAILQ_HEAD(, malloc_task) completed_tasks;
785 	uint32_t num_completions = 0;
786 
787 	TAILQ_INIT(&completed_tasks);
788 	TAILQ_SWAP(&completed_tasks, &ch->completed_tasks, malloc_task, tailq);
789 
790 	while (!TAILQ_EMPTY(&completed_tasks)) {
791 		task = TAILQ_FIRST(&completed_tasks);
792 		TAILQ_REMOVE(&completed_tasks, task, tailq);
793 		spdk_bdev_io_complete(spdk_bdev_io_from_ctx(task), task->status);
794 		num_completions++;
795 	}
796 
797 	return num_completions > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
798 }
799 
800 static int
801 malloc_create_channel_cb(void *io_device, void *ctx)
802 {
803 	struct malloc_channel *ch = ctx;
804 
805 	ch->accel_channel = spdk_accel_get_io_channel();
806 	if (!ch->accel_channel) {
807 		SPDK_ERRLOG("Failed to get accel framework's IO channel\n");
808 		return -ENOMEM;
809 	}
810 
811 	ch->completion_poller = SPDK_POLLER_REGISTER(malloc_completion_poller, ch, 0);
812 	if (!ch->completion_poller) {
813 		SPDK_ERRLOG("Failed to register malloc completion poller\n");
814 		spdk_put_io_channel(ch->accel_channel);
815 		return -ENOMEM;
816 	}
817 
818 	TAILQ_INIT(&ch->completed_tasks);
819 
820 	return 0;
821 }
822 
823 static void
824 malloc_destroy_channel_cb(void *io_device, void *ctx)
825 {
826 	struct malloc_channel *ch = ctx;
827 
828 	assert(TAILQ_EMPTY(&ch->completed_tasks));
829 
830 	spdk_put_io_channel(ch->accel_channel);
831 	spdk_poller_unregister(&ch->completion_poller);
832 }
833 
834 static int
835 bdev_malloc_initialize(void)
836 {
837 	/* This needs to be reset for each reinitialization of submodules.
838 	 * Otherwise after enough devices or reinitializations the value gets too high.
839 	 * TODO: Make malloc bdev name mandatory and remove this counter. */
840 	malloc_disk_count = 0;
841 
842 	spdk_io_device_register(&g_malloc_disks, malloc_create_channel_cb,
843 				malloc_destroy_channel_cb, sizeof(struct malloc_channel),
844 				"bdev_malloc");
845 
846 	return 0;
847 }
848 
849 static void
850 bdev_malloc_deinitialize(void)
851 {
852 	spdk_io_device_unregister(&g_malloc_disks, NULL);
853 }
854 
855 SPDK_LOG_REGISTER_COMPONENT(bdev_malloc)
856