xref: /spdk/module/bdev/raid/raid1.c (revision 69038a944dd521e7f620676a916faef0f35e4ed1)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "bdev_raid.h"
7 
8 #include "spdk/likely.h"
9 #include "spdk/log.h"
10 
11 struct raid1_info {
12 	/* The parent raid bdev */
13 	struct raid_bdev *raid_bdev;
14 };
15 
16 static void
17 raid1_bdev_io_completion(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
18 {
19 	struct raid_bdev_io *raid_io = cb_arg;
20 
21 	spdk_bdev_free_io(bdev_io);
22 
23 	raid_bdev_io_complete_part(raid_io, 1, success ?
24 				   SPDK_BDEV_IO_STATUS_SUCCESS :
25 				   SPDK_BDEV_IO_STATUS_FAILED);
26 }
27 
28 static void raid1_submit_rw_request(struct raid_bdev_io *raid_io);
29 
30 static void
31 _raid1_submit_rw_request(void *_raid_io)
32 {
33 	struct raid_bdev_io *raid_io = _raid_io;
34 
35 	raid1_submit_rw_request(raid_io);
36 }
37 
38 static int
39 raid1_submit_read_request(struct raid_bdev_io *raid_io)
40 {
41 	struct raid_bdev *raid_bdev = raid_io->raid_bdev;
42 	struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(raid_io);
43 	uint8_t ch_idx = 0;
44 	struct raid_base_bdev_info *base_info = &raid_bdev->base_bdev_info[ch_idx];
45 	struct spdk_io_channel *base_ch = raid_io->raid_ch->base_channel[ch_idx];
46 	uint64_t pd_lba, pd_blocks;
47 	int ret;
48 
49 	pd_lba = bdev_io->u.bdev.offset_blocks;
50 	pd_blocks = bdev_io->u.bdev.num_blocks;
51 
52 	raid_io->base_bdev_io_remaining = 1;
53 
54 	ret = spdk_bdev_readv_blocks(base_info->desc, base_ch,
55 				     bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
56 				     pd_lba, pd_blocks,
57 				     raid1_bdev_io_completion, raid_io);
58 
59 	if (spdk_likely(ret == 0)) {
60 		raid_io->base_bdev_io_submitted++;
61 	} else if (spdk_unlikely(ret == -ENOMEM)) {
62 		raid_bdev_queue_io_wait(raid_io, base_info->bdev, base_ch,
63 					_raid1_submit_rw_request);
64 		return 0;
65 	}
66 
67 	return ret;
68 }
69 
70 static int
71 raid1_submit_write_request(struct raid_bdev_io *raid_io)
72 {
73 	struct raid_bdev *raid_bdev = raid_io->raid_bdev;
74 	struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(raid_io);
75 	struct raid_base_bdev_info *base_info;
76 	struct spdk_io_channel *base_ch;
77 	uint64_t pd_lba, pd_blocks;
78 	uint16_t idx = raid_io->base_bdev_io_submitted;
79 	uint64_t base_bdev_io_not_submitted;
80 	int ret = 0;
81 
82 	pd_lba = bdev_io->u.bdev.offset_blocks;
83 	pd_blocks = bdev_io->u.bdev.num_blocks;
84 
85 	if (raid_io->base_bdev_io_submitted == 0) {
86 		raid_io->base_bdev_io_remaining = raid_bdev->num_base_bdevs;
87 	}
88 
89 	for (; idx < raid_bdev->num_base_bdevs; idx++) {
90 		base_info = &raid_bdev->base_bdev_info[idx];
91 		base_ch = raid_io->raid_ch->base_channel[idx];
92 
93 		ret = spdk_bdev_writev_blocks(base_info->desc, base_ch,
94 					      bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
95 					      pd_lba, pd_blocks,
96 					      raid1_bdev_io_completion, raid_io);
97 
98 		if (spdk_unlikely(ret != 0)) {
99 			if (spdk_unlikely(ret == -ENOMEM)) {
100 				raid_bdev_queue_io_wait(raid_io, base_info->bdev, base_ch,
101 							_raid1_submit_rw_request);
102 				return 0;
103 			}
104 
105 			base_bdev_io_not_submitted = raid_bdev->num_base_bdevs -
106 						     raid_io->base_bdev_io_submitted;
107 			raid_bdev_io_complete_part(raid_io, base_bdev_io_not_submitted,
108 						   SPDK_BDEV_IO_STATUS_FAILED);
109 			return 0;
110 		}
111 
112 		raid_io->base_bdev_io_submitted++;
113 	}
114 
115 	return ret;
116 }
117 
118 static void
119 raid1_submit_rw_request(struct raid_bdev_io *raid_io)
120 {
121 	struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(raid_io);
122 	int ret;
123 
124 	switch (bdev_io->type) {
125 	case SPDK_BDEV_IO_TYPE_READ:
126 		ret = raid1_submit_read_request(raid_io);
127 		break;
128 	case SPDK_BDEV_IO_TYPE_WRITE:
129 		ret = raid1_submit_write_request(raid_io);
130 		break;
131 	default:
132 		ret = -EINVAL;
133 		break;
134 	}
135 
136 	if (spdk_unlikely(ret != 0)) {
137 		raid_bdev_io_complete(raid_io, SPDK_BDEV_IO_STATUS_FAILED);
138 	}
139 }
140 
141 static int
142 raid1_start(struct raid_bdev *raid_bdev)
143 {
144 	uint64_t min_blockcnt = UINT64_MAX;
145 	struct raid_base_bdev_info *base_info;
146 	struct raid1_info *r1info;
147 
148 	r1info = calloc(1, sizeof(*r1info));
149 	if (!r1info) {
150 		SPDK_ERRLOG("Failed to allocate RAID1 info device structure\n");
151 		return -ENOMEM;
152 	}
153 	r1info->raid_bdev = raid_bdev;
154 
155 	RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
156 		min_blockcnt = spdk_min(min_blockcnt, base_info->bdev->blockcnt);
157 	}
158 
159 	raid_bdev->bdev.blockcnt = min_blockcnt;
160 	raid_bdev->module_private = r1info;
161 
162 	return 0;
163 }
164 
165 static bool
166 raid1_stop(struct raid_bdev *raid_bdev)
167 {
168 	struct raid1_info *r1info = raid_bdev->module_private;
169 
170 	free(r1info);
171 
172 	return true;
173 }
174 
175 static struct raid_bdev_module g_raid1_module = {
176 	.level = RAID1,
177 	.base_bdevs_min = 2,
178 	.base_bdevs_constraint = {CONSTRAINT_MIN_BASE_BDEVS_OPERATIONAL, 1},
179 	.start = raid1_start,
180 	.stop = raid1_stop,
181 	.submit_rw_request = raid1_submit_rw_request,
182 };
183 RAID_MODULE_REGISTER(&g_raid1_module)
184 
185 SPDK_LOG_REGISTER_COMPONENT(bdev_raid1)
186