Lines Matching refs:bdev

43 	struct spdk_bdev *bdev = bdev_io->bdev;
49 assert(bdev_io->u.bdev.memory_domain == NULL);
51 dif_opts.dif_pi_format = bdev->dif_pi_format;
53 bdev->blocklen,
54 bdev->md_len,
55 bdev->md_interleave,
56 bdev->dif_is_head_of_md,
57 bdev->dif_type,
58 bdev_io->u.bdev.dif_check_flags,
59 bdev_io->u.bdev.offset_blocks & 0xFFFFFFFF,
66 if (spdk_bdev_is_md_interleaved(bdev)) {
69 bdev_io->u.bdev.num_blocks,
75 .iov_len = bdev_io->u.bdev.num_blocks * bdev->md_len,
78 if (bdev_io->u.bdev.md_buf == NULL) {
85 bdev_io->u.bdev.num_blocks,
93 bdev_io->u.bdev.offset_blocks,
94 bdev_io->u.bdev.num_blocks,
108 bdev_io->u.bdev.iovs,
109 bdev_io->u.bdev.iovcnt,
110 bdev_io->u.bdev.md_buf);
117 struct spdk_bdev *bdev = bdev_io->bdev;
118 struct malloc_disk *mdisk = bdev->ctxt;
121 len = bdev_io->u.bdev.num_blocks * bdev->blocklen;
122 offset = bdev_io->u.bdev.offset_blocks * bdev->blocklen;
133 struct spdk_bdev *bdev = bdev_io->bdev;
134 struct malloc_disk *mdisk = bdev_io->bdev->ctxt;
135 uint32_t block_size = bdev_io->bdev->blocklen;
142 dif_opts.dif_pi_format = bdev->dif_pi_format;
143 dif_check_flags = bdev->dif_check_flags | SPDK_DIF_CHECK_TYPE_REFTAG |
146 bdev->blocklen,
147 bdev->md_len,
148 bdev->md_interleave,
149 bdev->dif_is_head_of_md,
150 bdev->dif_type,
160 if (bdev->md_interleave) {
162 .iov_base = mdisk->malloc_buf + bdev_io->u.bdev.offset_blocks * block_size,
163 .iov_len = bdev_io->u.bdev.num_blocks * block_size,
166 rc = spdk_dif_generate(&iov, 1, bdev_io->u.bdev.num_blocks, &dif_ctx);
169 .iov_base = mdisk->malloc_buf + bdev_io->u.bdev.offset_blocks * block_size,
170 .iov_len = bdev_io->u.bdev.num_blocks * block_size,
174 .iov_base = mdisk->malloc_md_buf + bdev_io->u.bdev.offset_blocks * bdev->md_len,
175 .iov_len = bdev_io->u.bdev.num_blocks * bdev->md_len,
178 rc = spdk_dix_generate(&iov, 1, &md_iov, bdev_io->u.bdev.num_blocks, &dif_ctx);
210 if (bdev_io->bdev->dif_type != SPDK_DIF_DISABLE &&
241 assert(!bdev_io->u.bdev.accel_sequence || task->status == SPDK_BDEV_IO_STATUS_NOMEM);
318 return bdev_io->u.bdev.num_blocks * bdev_io->bdev->md_len;
324 return bdev_io->u.bdev.offset_blocks * bdev_io->bdev->md_len;
330 struct malloc_disk *mdisk = SPDK_CONTAINEROF(bdev_io->bdev, struct malloc_disk, disk);
332 assert(spdk_bdev_is_md_separate(bdev_io->bdev));
342 /* For ENOMEM, the IO will be retried by the bdev layer, so we don't abort the sequence */
344 spdk_accel_sequence_abort(bdev_io->u.bdev.accel_sequence);
345 bdev_io->u.bdev.accel_sequence = NULL;
357 bdev_io->u.bdev.accel_sequence = NULL;
358 /* Prevent bdev layer from retrying the request if the sequence failed with ENOMEM */
369 len = bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen;
370 offset = bdev_io->u.bdev.offset_blocks * bdev_io->bdev->blocklen;
372 if (bdev_malloc_check_iov_len(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, len)) {
384 len, offset, bdev_io->u.bdev.iovcnt);
387 res = spdk_accel_append_copy(&bdev_io->u.bdev.accel_sequence, ch,
388 bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
389 bdev_io->u.bdev.memory_domain,
390 bdev_io->u.bdev.memory_domain_ctx,
397 spdk_accel_sequence_reverse(bdev_io->u.bdev.accel_sequence);
398 spdk_accel_sequence_finish(bdev_io->u.bdev.accel_sequence, malloc_sequence_done, task);
400 if (bdev_io->u.bdev.md_buf == NULL) {
408 res = spdk_accel_submit_copy(ch, bdev_io->u.bdev.md_buf, malloc_get_md_buf(bdev_io),
422 len = bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen;
423 offset = bdev_io->u.bdev.offset_blocks * bdev_io->bdev->blocklen;
425 if (bdev_malloc_check_iov_len(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, len)) {
437 len, offset, bdev_io->u.bdev.iovcnt);
440 res = spdk_accel_append_copy(&bdev_io->u.bdev.accel_sequence, ch, &task->iov, 1, NULL, NULL,
441 bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
442 bdev_io->u.bdev.memory_domain,
443 bdev_io->u.bdev.memory_domain_ctx, NULL, NULL);
449 spdk_accel_sequence_finish(bdev_io->u.bdev.accel_sequence, malloc_sequence_done, task);
451 if (bdev_io->u.bdev.md_buf == NULL) {
459 res = spdk_accel_submit_copy(ch, malloc_get_md_buf(bdev_io), bdev_io->u.bdev.md_buf,
505 struct malloc_disk *disk = bdev_io->bdev->ctxt;
506 uint32_t block_size = bdev_io->bdev->blocklen;
511 if (bdev_io->u.bdev.iovs[0].iov_base == NULL) {
512 assert(bdev_io->u.bdev.iovcnt == 1);
513 assert(bdev_io->u.bdev.memory_domain == NULL);
514 bdev_io->u.bdev.iovs[0].iov_base =
515 disk->malloc_buf + bdev_io->u.bdev.offset_blocks * block_size;
516 bdev_io->u.bdev.iovs[0].iov_len = bdev_io->u.bdev.num_blocks * block_size;
517 if (spdk_bdev_is_md_separate(bdev_io->bdev)) {
525 if (bdev_io->bdev->dif_type != SPDK_DIF_DISABLE &&
538 if (bdev_io->bdev->dif_type != SPDK_DIF_DISABLE &&
560 bdev_io->u.bdev.offset_blocks * block_size,
561 bdev_io->u.bdev.num_blocks * block_size);
566 bdev_io->u.bdev.offset_blocks * block_size,
567 bdev_io->u.bdev.num_blocks * block_size);
570 if (bdev_io->u.bdev.zcopy.start) {
574 buf = disk->malloc_buf + bdev_io->u.bdev.offset_blocks * block_size;
575 len = bdev_io->u.bdev.num_blocks * block_size;
577 if (spdk_bdev_is_md_separate(bdev_io->bdev)) {
589 bdev_io->u.bdev.offset_blocks * block_size,
590 bdev_io->u.bdev.copy.src_offset_blocks * block_size,
591 bdev_io->u.bdev.num_blocks * block_size);
638 bdev_malloc_write_json_config(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w)
645 spdk_json_write_named_string(w, "name", bdev->name);
646 spdk_json_write_named_uint64(w, "num_blocks", bdev->blockcnt);
647 spdk_json_write_named_uint32(w, "block_size", bdev->blocklen);
648 spdk_json_write_named_uint32(w, "physical_block_size", bdev->phys_blocklen);
649 spdk_json_write_named_uuid(w, "uuid", &bdev->uuid);
650 spdk_json_write_named_uint32(w, "optimal_io_boundary", bdev->optimal_io_boundary);
651 spdk_json_write_named_uint32(w, "md_size", bdev->md_len);
652 spdk_json_write_named_uint32(w, "dif_type", bdev->dif_type);
653 spdk_json_write_named_bool(w, "dif_is_head_of_md", bdev->dif_is_head_of_md);
654 spdk_json_write_named_uint32(w, "dif_pi_format", bdev->dif_pi_format);
709 struct spdk_bdev *bdev = &mdisk->disk;
717 dif_opts.dif_pi_format = bdev->dif_pi_format;
718 /* Set APPTAG|REFTAG_IGNORE to PI fields after creation of malloc bdev */
719 dif_check_flags = bdev->dif_check_flags | SPDK_DIF_CHECK_TYPE_REFTAG |
722 bdev->blocklen,
723 bdev->md_len,
724 bdev->md_interleave,
725 bdev->dif_is_head_of_md,
726 bdev->dif_type,
737 iov.iov_len = bdev->blockcnt * bdev->blocklen;
740 rc = spdk_dif_generate(&iov, 1, bdev->blockcnt, &dif_ctx);
743 md_iov.iov_len = bdev->blockcnt * bdev->md_len;
745 rc = spdk_dix_generate(&iov, 1, &md_iov, bdev->blockcnt, &dif_ctx);
756 create_malloc_disk(struct spdk_bdev **bdev, const struct malloc_bdev_opts *opts)
895 *bdev = &(mdisk->disk);
973 * TODO: Make malloc bdev name mandatory and remove this counter. */