xref: /spdk/module/bdev/raid/raid1.c (revision 838e61c3772fdefb17e1a0b8f9880e2bcb9c4c0d)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "bdev_raid.h"
7 
8 #include "spdk/likely.h"
9 #include "spdk/log.h"
10 
11 struct raid1_info {
12 	/* The parent raid bdev */
13 	struct raid_bdev *raid_bdev;
14 };
15 
16 static void
17 raid1_bdev_io_completion(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
18 {
19 	struct raid_bdev_io *raid_io = cb_arg;
20 
21 	spdk_bdev_free_io(bdev_io);
22 
23 	raid_bdev_io_complete_part(raid_io, 1, success ?
24 				   SPDK_BDEV_IO_STATUS_SUCCESS :
25 				   SPDK_BDEV_IO_STATUS_FAILED);
26 }
27 
28 static void raid1_submit_rw_request(struct raid_bdev_io *raid_io);
29 
30 static void
31 _raid1_submit_rw_request(void *_raid_io)
32 {
33 	struct raid_bdev_io *raid_io = _raid_io;
34 
35 	raid1_submit_rw_request(raid_io);
36 }
37 
38 static void
39 raid1_init_ext_io_opts(struct spdk_bdev_io *bdev_io, struct spdk_bdev_ext_io_opts *opts)
40 {
41 	memset(opts, 0, sizeof(*opts));
42 	opts->size = sizeof(*opts);
43 	opts->memory_domain = bdev_io->u.bdev.memory_domain;
44 	opts->memory_domain_ctx = bdev_io->u.bdev.memory_domain_ctx;
45 	opts->metadata = bdev_io->u.bdev.md_buf;
46 }
47 
48 static int
49 raid1_submit_read_request(struct raid_bdev_io *raid_io)
50 {
51 	struct raid_bdev *raid_bdev = raid_io->raid_bdev;
52 	struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(raid_io);
53 	struct spdk_bdev_ext_io_opts io_opts;
54 	uint8_t ch_idx = 0;
55 	struct raid_base_bdev_info *base_info = &raid_bdev->base_bdev_info[ch_idx];
56 	struct spdk_io_channel *base_ch = raid_io->raid_ch->base_channel[ch_idx];
57 	uint64_t pd_lba, pd_blocks;
58 	int ret;
59 
60 	pd_lba = bdev_io->u.bdev.offset_blocks;
61 	pd_blocks = bdev_io->u.bdev.num_blocks;
62 
63 	raid_io->base_bdev_io_remaining = 1;
64 
65 	raid1_init_ext_io_opts(bdev_io, &io_opts);
66 	ret = spdk_bdev_readv_blocks_ext(base_info->desc, base_ch,
67 					 bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
68 					 pd_lba, pd_blocks, raid1_bdev_io_completion,
69 					 raid_io, &io_opts);
70 
71 	if (spdk_likely(ret == 0)) {
72 		raid_io->base_bdev_io_submitted++;
73 	} else if (spdk_unlikely(ret == -ENOMEM)) {
74 		raid_bdev_queue_io_wait(raid_io, base_info->bdev, base_ch,
75 					_raid1_submit_rw_request);
76 		return 0;
77 	}
78 
79 	return ret;
80 }
81 
82 static int
83 raid1_submit_write_request(struct raid_bdev_io *raid_io)
84 {
85 	struct raid_bdev *raid_bdev = raid_io->raid_bdev;
86 	struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(raid_io);
87 	struct spdk_bdev_ext_io_opts io_opts;
88 	struct raid_base_bdev_info *base_info;
89 	struct spdk_io_channel *base_ch;
90 	uint64_t pd_lba, pd_blocks;
91 	uint16_t idx = raid_io->base_bdev_io_submitted;
92 	uint64_t base_bdev_io_not_submitted;
93 	int ret = 0;
94 
95 	pd_lba = bdev_io->u.bdev.offset_blocks;
96 	pd_blocks = bdev_io->u.bdev.num_blocks;
97 
98 	if (raid_io->base_bdev_io_submitted == 0) {
99 		raid_io->base_bdev_io_remaining = raid_bdev->num_base_bdevs;
100 	}
101 
102 	raid1_init_ext_io_opts(bdev_io, &io_opts);
103 	for (; idx < raid_bdev->num_base_bdevs; idx++) {
104 		base_info = &raid_bdev->base_bdev_info[idx];
105 		base_ch = raid_io->raid_ch->base_channel[idx];
106 
107 		ret = spdk_bdev_writev_blocks_ext(base_info->desc, base_ch,
108 						  bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
109 						  pd_lba, pd_blocks, raid1_bdev_io_completion,
110 						  raid_io, &io_opts);
111 		if (spdk_unlikely(ret != 0)) {
112 			if (spdk_unlikely(ret == -ENOMEM)) {
113 				raid_bdev_queue_io_wait(raid_io, base_info->bdev, base_ch,
114 							_raid1_submit_rw_request);
115 				return 0;
116 			}
117 
118 			base_bdev_io_not_submitted = raid_bdev->num_base_bdevs -
119 						     raid_io->base_bdev_io_submitted;
120 			raid_bdev_io_complete_part(raid_io, base_bdev_io_not_submitted,
121 						   SPDK_BDEV_IO_STATUS_FAILED);
122 			return 0;
123 		}
124 
125 		raid_io->base_bdev_io_submitted++;
126 	}
127 
128 	return ret;
129 }
130 
131 static void
132 raid1_submit_rw_request(struct raid_bdev_io *raid_io)
133 {
134 	struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(raid_io);
135 	int ret;
136 
137 	switch (bdev_io->type) {
138 	case SPDK_BDEV_IO_TYPE_READ:
139 		ret = raid1_submit_read_request(raid_io);
140 		break;
141 	case SPDK_BDEV_IO_TYPE_WRITE:
142 		ret = raid1_submit_write_request(raid_io);
143 		break;
144 	default:
145 		ret = -EINVAL;
146 		break;
147 	}
148 
149 	if (spdk_unlikely(ret != 0)) {
150 		raid_bdev_io_complete(raid_io, SPDK_BDEV_IO_STATUS_FAILED);
151 	}
152 }
153 
154 static int
155 raid1_start(struct raid_bdev *raid_bdev)
156 {
157 	uint64_t min_blockcnt = UINT64_MAX;
158 	struct raid_base_bdev_info *base_info;
159 	struct raid1_info *r1info;
160 
161 	r1info = calloc(1, sizeof(*r1info));
162 	if (!r1info) {
163 		SPDK_ERRLOG("Failed to allocate RAID1 info device structure\n");
164 		return -ENOMEM;
165 	}
166 	r1info->raid_bdev = raid_bdev;
167 
168 	RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
169 		min_blockcnt = spdk_min(min_blockcnt, base_info->bdev->blockcnt);
170 	}
171 
172 	raid_bdev->bdev.blockcnt = min_blockcnt;
173 	raid_bdev->module_private = r1info;
174 
175 	return 0;
176 }
177 
178 static bool
179 raid1_stop(struct raid_bdev *raid_bdev)
180 {
181 	struct raid1_info *r1info = raid_bdev->module_private;
182 
183 	free(r1info);
184 
185 	return true;
186 }
187 
188 static struct raid_bdev_module g_raid1_module = {
189 	.level = RAID1,
190 	.base_bdevs_min = 2,
191 	.base_bdevs_constraint = {CONSTRAINT_MIN_BASE_BDEVS_OPERATIONAL, 1},
192 	.memory_domains_supported = true,
193 	.start = raid1_start,
194 	.stop = raid1_stop,
195 	.submit_rw_request = raid1_submit_rw_request,
196 };
197 RAID_MODULE_REGISTER(&g_raid1_module)
198 
199 SPDK_LOG_REGISTER_COMPONENT(bdev_raid1)
200