xref: /spdk/test/unit/lib/blob/bs_dev_common.c (revision 0098e636761237b77c12c30c2408263a5d2260cc)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "thread/thread_internal.h"
7 #include "bs_scheduler.c"
8 
9 
10 #define DEV_BUFFER_SIZE (64 * 1024 * 1024)
11 #define DEV_BUFFER_BLOCKLEN (4096)
12 #define DEV_BUFFER_BLOCKCNT (DEV_BUFFER_SIZE / DEV_BUFFER_BLOCKLEN)
13 uint8_t *g_dev_buffer;
14 uint64_t g_dev_write_bytes;
15 uint64_t g_dev_read_bytes;
16 bool g_dev_writev_ext_called;
17 bool g_dev_readv_ext_called;
18 struct spdk_blob_ext_io_opts g_blob_ext_io_opts;
19 
20 struct spdk_power_failure_counters {
21 	uint64_t general_counter;
22 	uint64_t read_counter;
23 	uint64_t write_counter;
24 	uint64_t unmap_counter;
25 	uint64_t write_zero_counter;
26 	uint64_t flush_counter;
27 };
28 
29 static struct spdk_power_failure_counters g_power_failure_counters = {};
30 
31 struct spdk_power_failure_thresholds {
32 	uint64_t general_threshold;
33 	uint64_t read_threshold;
34 	uint64_t write_threshold;
35 	uint64_t unmap_threshold;
36 	uint64_t write_zero_threshold;
37 	uint64_t flush_threshold;
38 };
39 
40 static struct spdk_power_failure_thresholds g_power_failure_thresholds = {};
41 
42 static uint64_t g_power_failure_rc;
43 
44 void dev_reset_power_failure_event(void);
45 void dev_reset_power_failure_counters(void);
46 void dev_set_power_failure_thresholds(struct spdk_power_failure_thresholds thresholds);
47 
48 void
49 dev_reset_power_failure_event(void)
50 {
51 	memset(&g_power_failure_counters, 0, sizeof(g_power_failure_counters));
52 	memset(&g_power_failure_thresholds, 0, sizeof(g_power_failure_thresholds));
53 	g_power_failure_rc = 0;
54 }
55 
56 void
57 dev_reset_power_failure_counters(void)
58 {
59 	memset(&g_power_failure_counters, 0, sizeof(g_power_failure_counters));
60 	g_power_failure_rc = 0;
61 }
62 
63 /**
64  * Set power failure event. Power failure will occur after given number
65  * of IO operations. It may occur after number of particular operations
66  * (read, write, unmap, write zero or flush) or after given number of
67  * any IO operations (general_threshold). Value 0 means that the threshold
68  * is disabled. Any other value is the number of operation starting from
69  * which power failure event will happen.
70  */
71 void
72 dev_set_power_failure_thresholds(struct spdk_power_failure_thresholds thresholds)
73 {
74 	g_power_failure_thresholds = thresholds;
75 }
76 
77 /* Define here for UT only. */
78 struct spdk_io_channel g_io_channel;
79 
80 static struct spdk_io_channel *
81 dev_create_channel(struct spdk_bs_dev *dev)
82 {
83 	return &g_io_channel;
84 }
85 
86 static void
87 dev_destroy_channel(struct spdk_bs_dev *dev, struct spdk_io_channel *channel)
88 {
89 }
90 
91 static void
92 dev_destroy(struct spdk_bs_dev *dev)
93 {
94 	free(dev);
95 }
96 
97 
98 static void
99 dev_complete_cb(void *arg)
100 {
101 	struct spdk_bs_dev_cb_args *cb_args = arg;
102 
103 	cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, g_power_failure_rc);
104 }
105 
106 static void
107 dev_complete(void *arg)
108 {
109 	_bs_send_msg(dev_complete_cb, arg, NULL);
110 }
111 
112 static void
113 dev_read(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *payload,
114 	 uint64_t lba, uint32_t lba_count,
115 	 struct spdk_bs_dev_cb_args *cb_args)
116 {
117 	uint64_t offset, length;
118 
119 	if (g_power_failure_thresholds.read_threshold != 0) {
120 		g_power_failure_counters.read_counter++;
121 	}
122 
123 	if (g_power_failure_thresholds.general_threshold != 0) {
124 		g_power_failure_counters.general_counter++;
125 	}
126 
127 	if ((g_power_failure_thresholds.read_threshold == 0 ||
128 	     g_power_failure_counters.read_counter < g_power_failure_thresholds.read_threshold) &&
129 	    (g_power_failure_thresholds.general_threshold == 0 ||
130 	     g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
131 		offset = lba * dev->blocklen;
132 		length = lba_count * dev->blocklen;
133 		SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
134 
135 		if (length > 0) {
136 			memcpy(payload, &g_dev_buffer[offset], length);
137 			g_dev_read_bytes += length;
138 		}
139 	} else {
140 		g_power_failure_rc = -EIO;
141 	}
142 
143 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
144 }
145 
146 static void
147 dev_write(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *payload,
148 	  uint64_t lba, uint32_t lba_count,
149 	  struct spdk_bs_dev_cb_args *cb_args)
150 {
151 	uint64_t offset, length;
152 
153 	if (g_power_failure_thresholds.write_threshold != 0) {
154 		g_power_failure_counters.write_counter++;
155 	}
156 
157 	if (g_power_failure_thresholds.general_threshold != 0) {
158 		g_power_failure_counters.general_counter++;
159 	}
160 
161 	if ((g_power_failure_thresholds.write_threshold == 0 ||
162 	     g_power_failure_counters.write_counter < g_power_failure_thresholds.write_threshold) &&
163 	    (g_power_failure_thresholds.general_threshold == 0 ||
164 	     g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
165 		offset = lba * dev->blocklen;
166 		length = lba_count * dev->blocklen;
167 		SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
168 
169 		memcpy(&g_dev_buffer[offset], payload, length);
170 		g_dev_write_bytes += length;
171 	} else {
172 		g_power_failure_rc = -EIO;
173 	}
174 
175 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
176 }
177 
178 static void
179 __check_iov(struct iovec *iov, int iovcnt, uint64_t length)
180 {
181 	int i;
182 
183 	for (i = 0; i < iovcnt; i++) {
184 		length -= iov[i].iov_len;
185 	}
186 
187 	CU_ASSERT(length == 0);
188 }
189 
190 static void
191 dev_readv(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
192 	  struct iovec *iov, int iovcnt,
193 	  uint64_t lba, uint32_t lba_count,
194 	  struct spdk_bs_dev_cb_args *cb_args)
195 {
196 	uint64_t offset, length;
197 	int i;
198 
199 	if (g_power_failure_thresholds.read_threshold != 0) {
200 		g_power_failure_counters.read_counter++;
201 	}
202 
203 	if (g_power_failure_thresholds.general_threshold != 0) {
204 		g_power_failure_counters.general_counter++;
205 	}
206 
207 	if ((g_power_failure_thresholds.read_threshold == 0 ||
208 	     g_power_failure_counters.read_counter < g_power_failure_thresholds.read_threshold) &&
209 	    (g_power_failure_thresholds.general_threshold == 0 ||
210 	     g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
211 		offset = lba * dev->blocklen;
212 		length = lba_count * dev->blocklen;
213 		SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
214 		__check_iov(iov, iovcnt, length);
215 
216 		for (i = 0; i < iovcnt; i++) {
217 			memcpy(iov[i].iov_base, &g_dev_buffer[offset], iov[i].iov_len);
218 			offset += iov[i].iov_len;
219 		}
220 
221 		g_dev_read_bytes += length;
222 	} else {
223 		g_power_failure_rc = -EIO;
224 	}
225 
226 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
227 }
228 
229 static void
230 dev_readv_ext(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
231 	      struct iovec *iov, int iovcnt,
232 	      uint64_t lba, uint32_t lba_count,
233 	      struct spdk_bs_dev_cb_args *cb_args,
234 	      struct spdk_blob_ext_io_opts *io_opts)
235 {
236 	g_dev_readv_ext_called = true;
237 	g_blob_ext_io_opts = *io_opts;
238 	dev_readv(dev, channel, iov, iovcnt, lba, lba_count, cb_args);
239 }
240 
241 static void
242 dev_writev(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
243 	   struct iovec *iov, int iovcnt,
244 	   uint64_t lba, uint32_t lba_count,
245 	   struct spdk_bs_dev_cb_args *cb_args)
246 {
247 	uint64_t offset, length;
248 	int i;
249 
250 	if (g_power_failure_thresholds.write_threshold != 0) {
251 		g_power_failure_counters.write_counter++;
252 	}
253 
254 	if (g_power_failure_thresholds.general_threshold != 0) {
255 		g_power_failure_counters.general_counter++;
256 	}
257 
258 	if ((g_power_failure_thresholds.write_threshold == 0 ||
259 	     g_power_failure_counters.write_counter < g_power_failure_thresholds.write_threshold)  &&
260 	    (g_power_failure_thresholds.general_threshold == 0 ||
261 	     g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
262 		offset = lba * dev->blocklen;
263 		length = lba_count * dev->blocklen;
264 		SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
265 		__check_iov(iov, iovcnt, length);
266 
267 		for (i = 0; i < iovcnt; i++) {
268 			memcpy(&g_dev_buffer[offset], iov[i].iov_base, iov[i].iov_len);
269 			offset += iov[i].iov_len;
270 		}
271 
272 		g_dev_write_bytes += length;
273 	} else {
274 		g_power_failure_rc = -EIO;
275 	}
276 
277 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
278 }
279 
280 static void
281 dev_writev_ext(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
282 	       struct iovec *iov, int iovcnt,
283 	       uint64_t lba, uint32_t lba_count,
284 	       struct spdk_bs_dev_cb_args *cb_args,
285 	       struct spdk_blob_ext_io_opts *io_opts)
286 {
287 	g_dev_writev_ext_called = true;
288 	g_blob_ext_io_opts = *io_opts;
289 	dev_writev(dev, channel, iov, iovcnt, lba, lba_count, cb_args);
290 }
291 
292 static void
293 dev_flush(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
294 	  struct spdk_bs_dev_cb_args *cb_args)
295 {
296 	if (g_power_failure_thresholds.flush_threshold != 0) {
297 		g_power_failure_counters.flush_counter++;
298 	}
299 
300 	if (g_power_failure_thresholds.general_threshold != 0) {
301 		g_power_failure_counters.general_counter++;
302 	}
303 
304 	if ((g_power_failure_thresholds.flush_threshold != 0 &&
305 	     g_power_failure_counters.flush_counter >= g_power_failure_thresholds.flush_threshold)  ||
306 	    (g_power_failure_thresholds.general_threshold != 0 &&
307 	     g_power_failure_counters.general_counter >= g_power_failure_thresholds.general_threshold)) {
308 		g_power_failure_rc = -EIO;
309 	}
310 
311 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
312 }
313 
314 static void
315 dev_unmap(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
316 	  uint64_t lba, uint64_t lba_count,
317 	  struct spdk_bs_dev_cb_args *cb_args)
318 {
319 	uint64_t offset, length;
320 
321 	if (g_power_failure_thresholds.unmap_threshold != 0) {
322 		g_power_failure_counters.unmap_counter++;
323 	}
324 
325 	if (g_power_failure_thresholds.general_threshold != 0) {
326 		g_power_failure_counters.general_counter++;
327 	}
328 
329 	if ((g_power_failure_thresholds.unmap_threshold == 0 ||
330 	     g_power_failure_counters.unmap_counter < g_power_failure_thresholds.unmap_threshold)  &&
331 	    (g_power_failure_thresholds.general_threshold == 0 ||
332 	     g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
333 		offset = lba * dev->blocklen;
334 		length = lba_count * dev->blocklen;
335 		SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
336 		memset(&g_dev_buffer[offset], 0, length);
337 	} else {
338 		g_power_failure_rc = -EIO;
339 	}
340 
341 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
342 }
343 
344 static void
345 dev_write_zeroes(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
346 		 uint64_t lba, uint64_t lba_count,
347 		 struct spdk_bs_dev_cb_args *cb_args)
348 {
349 	uint64_t offset, length;
350 
351 	if (g_power_failure_thresholds.write_zero_threshold != 0) {
352 		g_power_failure_counters.write_zero_counter++;
353 	}
354 
355 	if (g_power_failure_thresholds.general_threshold != 0) {
356 		g_power_failure_counters.general_counter++;
357 	}
358 
359 	if ((g_power_failure_thresholds.write_zero_threshold == 0 ||
360 	     g_power_failure_counters.write_zero_counter < g_power_failure_thresholds.write_zero_threshold)  &&
361 	    (g_power_failure_thresholds.general_threshold == 0 ||
362 	     g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
363 		offset = lba * dev->blocklen;
364 		length = lba_count * dev->blocklen;
365 		SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
366 		memset(&g_dev_buffer[offset], 0, length);
367 		g_dev_write_bytes += length;
368 	} else {
369 		g_power_failure_rc = -EIO;
370 	}
371 
372 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
373 }
374 
375 static struct spdk_bs_dev *
376 init_dev(void)
377 {
378 	struct spdk_bs_dev *dev = calloc(1, sizeof(*dev));
379 
380 	SPDK_CU_ASSERT_FATAL(dev != NULL);
381 
382 	dev->create_channel = dev_create_channel;
383 	dev->destroy_channel = dev_destroy_channel;
384 	dev->destroy = dev_destroy;
385 	dev->read = dev_read;
386 	dev->write = dev_write;
387 	dev->readv = dev_readv;
388 	dev->writev = dev_writev;
389 	dev->readv_ext = dev_readv_ext;
390 	dev->writev_ext = dev_writev_ext;
391 	dev->flush = dev_flush;
392 	dev->unmap = dev_unmap;
393 	dev->write_zeroes = dev_write_zeroes;
394 	dev->blockcnt = DEV_BUFFER_BLOCKCNT;
395 	dev->blocklen = DEV_BUFFER_BLOCKLEN;
396 
397 	return dev;
398 }
399