xref: /spdk/test/unit/lib/blob/bs_dev_common.c (revision 2f5c602574a98ede645991abe279a96e19c50196)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "thread/thread_internal.h"
35 #include "bs_scheduler.c"
36 
37 
38 #define DEV_BUFFER_SIZE (64 * 1024 * 1024)
39 #define DEV_BUFFER_BLOCKLEN (4096)
40 #define DEV_BUFFER_BLOCKCNT (DEV_BUFFER_SIZE / DEV_BUFFER_BLOCKLEN)
41 uint8_t *g_dev_buffer;
42 uint64_t g_dev_write_bytes;
43 uint64_t g_dev_read_bytes;
44 
45 struct spdk_power_failure_counters {
46 	uint64_t general_counter;
47 	uint64_t read_counter;
48 	uint64_t write_counter;
49 	uint64_t unmap_counter;
50 	uint64_t write_zero_counter;
51 	uint64_t flush_counter;
52 };
53 
54 static struct spdk_power_failure_counters g_power_failure_counters = {};
55 
56 struct spdk_power_failure_thresholds {
57 	uint64_t general_threshold;
58 	uint64_t read_threshold;
59 	uint64_t write_threshold;
60 	uint64_t unmap_threshold;
61 	uint64_t write_zero_threshold;
62 	uint64_t flush_threshold;
63 };
64 
65 static struct spdk_power_failure_thresholds g_power_failure_thresholds = {};
66 
67 static uint64_t g_power_failure_rc;
68 
69 void dev_reset_power_failure_event(void);
70 void dev_reset_power_failure_counters(void);
71 void dev_set_power_failure_thresholds(struct spdk_power_failure_thresholds thresholds);
72 
73 void
74 dev_reset_power_failure_event(void)
75 {
76 	memset(&g_power_failure_counters, 0, sizeof(g_power_failure_counters));
77 	memset(&g_power_failure_thresholds, 0, sizeof(g_power_failure_thresholds));
78 	g_power_failure_rc = 0;
79 }
80 
81 void
82 dev_reset_power_failure_counters(void)
83 {
84 	memset(&g_power_failure_counters, 0, sizeof(g_power_failure_counters));
85 	g_power_failure_rc = 0;
86 }
87 
88 /**
89  * Set power failure event. Power failure will occur after given number
90  * of IO operations. It may occure after number of particular operations
91  * (read, write, unmap, write zero or flush) or after given number of
92  * any IO operations (general_treshold). Value 0 means that the treshold
93  * is disabled. Any other value is the number of operation starting from
94  * which power failure event will happen.
95  */
96 void
97 dev_set_power_failure_thresholds(struct spdk_power_failure_thresholds thresholds)
98 {
99 	g_power_failure_thresholds = thresholds;
100 }
101 
102 /* Define here for UT only. */
103 struct spdk_io_channel g_io_channel;
104 
105 static struct spdk_io_channel *
106 dev_create_channel(struct spdk_bs_dev *dev)
107 {
108 	return &g_io_channel;
109 }
110 
111 static void
112 dev_destroy_channel(struct spdk_bs_dev *dev, struct spdk_io_channel *channel)
113 {
114 }
115 
116 static void
117 dev_destroy(struct spdk_bs_dev *dev)
118 {
119 	free(dev);
120 }
121 
122 
123 static void
124 dev_complete_cb(void *arg)
125 {
126 	struct spdk_bs_dev_cb_args *cb_args = arg;
127 
128 	cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, g_power_failure_rc);
129 }
130 
131 static void
132 dev_complete(void *arg)
133 {
134 	_bs_send_msg(dev_complete_cb, arg, NULL);
135 }
136 
137 static void
138 dev_read(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *payload,
139 	 uint64_t lba, uint32_t lba_count,
140 	 struct spdk_bs_dev_cb_args *cb_args)
141 {
142 	uint64_t offset, length;
143 
144 	if (g_power_failure_thresholds.read_threshold != 0) {
145 		g_power_failure_counters.read_counter++;
146 	}
147 
148 	if (g_power_failure_thresholds.general_threshold != 0) {
149 		g_power_failure_counters.general_counter++;
150 	}
151 
152 	if ((g_power_failure_thresholds.read_threshold == 0 ||
153 	     g_power_failure_counters.read_counter < g_power_failure_thresholds.read_threshold) &&
154 	    (g_power_failure_thresholds.general_threshold == 0 ||
155 	     g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
156 		offset = lba * dev->blocklen;
157 		length = lba_count * dev->blocklen;
158 		SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
159 
160 		if (length > 0) {
161 			memcpy(payload, &g_dev_buffer[offset], length);
162 			g_dev_read_bytes += length;
163 		}
164 	} else {
165 		g_power_failure_rc = -EIO;
166 	}
167 
168 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
169 }
170 
171 static void
172 dev_write(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *payload,
173 	  uint64_t lba, uint32_t lba_count,
174 	  struct spdk_bs_dev_cb_args *cb_args)
175 {
176 	uint64_t offset, length;
177 
178 	if (g_power_failure_thresholds.write_threshold != 0) {
179 		g_power_failure_counters.write_counter++;
180 	}
181 
182 	if (g_power_failure_thresholds.general_threshold != 0) {
183 		g_power_failure_counters.general_counter++;
184 	}
185 
186 	if ((g_power_failure_thresholds.write_threshold == 0 ||
187 	     g_power_failure_counters.write_counter < g_power_failure_thresholds.write_threshold) &&
188 	    (g_power_failure_thresholds.general_threshold == 0 ||
189 	     g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
190 		offset = lba * dev->blocklen;
191 		length = lba_count * dev->blocklen;
192 		SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
193 
194 		memcpy(&g_dev_buffer[offset], payload, length);
195 		g_dev_write_bytes += length;
196 	} else {
197 		g_power_failure_rc = -EIO;
198 	}
199 
200 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
201 }
202 
203 static void
204 __check_iov(struct iovec *iov, int iovcnt, uint64_t length)
205 {
206 	int i;
207 
208 	for (i = 0; i < iovcnt; i++) {
209 		length -= iov[i].iov_len;
210 	}
211 
212 	CU_ASSERT(length == 0);
213 }
214 
215 static void
216 dev_readv(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
217 	  struct iovec *iov, int iovcnt,
218 	  uint64_t lba, uint32_t lba_count,
219 	  struct spdk_bs_dev_cb_args *cb_args)
220 {
221 	uint64_t offset, length;
222 	int i;
223 
224 	if (g_power_failure_thresholds.read_threshold != 0) {
225 		g_power_failure_counters.read_counter++;
226 	}
227 
228 	if (g_power_failure_thresholds.general_threshold != 0) {
229 		g_power_failure_counters.general_counter++;
230 	}
231 
232 	if ((g_power_failure_thresholds.read_threshold == 0 ||
233 	     g_power_failure_counters.read_counter < g_power_failure_thresholds.read_threshold) &&
234 	    (g_power_failure_thresholds.general_threshold == 0 ||
235 	     g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
236 		offset = lba * dev->blocklen;
237 		length = lba_count * dev->blocklen;
238 		SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
239 		__check_iov(iov, iovcnt, length);
240 
241 		for (i = 0; i < iovcnt; i++) {
242 			memcpy(iov[i].iov_base, &g_dev_buffer[offset], iov[i].iov_len);
243 			offset += iov[i].iov_len;
244 		}
245 
246 		g_dev_read_bytes += length;
247 	} else {
248 		g_power_failure_rc = -EIO;
249 	}
250 
251 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
252 }
253 
254 static void
255 dev_writev(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
256 	   struct iovec *iov, int iovcnt,
257 	   uint64_t lba, uint32_t lba_count,
258 	   struct spdk_bs_dev_cb_args *cb_args)
259 {
260 	uint64_t offset, length;
261 	int i;
262 
263 	if (g_power_failure_thresholds.write_threshold != 0) {
264 		g_power_failure_counters.write_counter++;
265 	}
266 
267 	if (g_power_failure_thresholds.general_threshold != 0) {
268 		g_power_failure_counters.general_counter++;
269 	}
270 
271 	if ((g_power_failure_thresholds.write_threshold == 0 ||
272 	     g_power_failure_counters.write_counter < g_power_failure_thresholds.write_threshold)  &&
273 	    (g_power_failure_thresholds.general_threshold == 0 ||
274 	     g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
275 		offset = lba * dev->blocklen;
276 		length = lba_count * dev->blocklen;
277 		SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
278 		__check_iov(iov, iovcnt, length);
279 
280 		for (i = 0; i < iovcnt; i++) {
281 			memcpy(&g_dev_buffer[offset], iov[i].iov_base, iov[i].iov_len);
282 			offset += iov[i].iov_len;
283 		}
284 
285 		g_dev_write_bytes += length;
286 	} else {
287 		g_power_failure_rc = -EIO;
288 	}
289 
290 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
291 }
292 
293 static void
294 dev_flush(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
295 	  struct spdk_bs_dev_cb_args *cb_args)
296 {
297 	if (g_power_failure_thresholds.flush_threshold != 0) {
298 		g_power_failure_counters.flush_counter++;
299 	}
300 
301 	if (g_power_failure_thresholds.general_threshold != 0) {
302 		g_power_failure_counters.general_counter++;
303 	}
304 
305 	if ((g_power_failure_thresholds.flush_threshold != 0 &&
306 	     g_power_failure_counters.flush_counter >= g_power_failure_thresholds.flush_threshold)  ||
307 	    (g_power_failure_thresholds.general_threshold != 0 &&
308 	     g_power_failure_counters.general_counter >= g_power_failure_thresholds.general_threshold)) {
309 		g_power_failure_rc = -EIO;
310 	}
311 
312 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
313 }
314 
315 static void
316 dev_unmap(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
317 	  uint64_t lba, uint32_t lba_count,
318 	  struct spdk_bs_dev_cb_args *cb_args)
319 {
320 	uint64_t offset, length;
321 
322 	if (g_power_failure_thresholds.unmap_threshold != 0) {
323 		g_power_failure_counters.unmap_counter++;
324 	}
325 
326 	if (g_power_failure_thresholds.general_threshold != 0) {
327 		g_power_failure_counters.general_counter++;
328 	}
329 
330 	if ((g_power_failure_thresholds.unmap_threshold == 0 ||
331 	     g_power_failure_counters.unmap_counter < g_power_failure_thresholds.unmap_threshold)  &&
332 	    (g_power_failure_thresholds.general_threshold == 0 ||
333 	     g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
334 		offset = lba * dev->blocklen;
335 		length = lba_count * dev->blocklen;
336 		SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
337 		memset(&g_dev_buffer[offset], 0, length);
338 	} else {
339 		g_power_failure_rc = -EIO;
340 	}
341 
342 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
343 }
344 
345 static void
346 dev_write_zeroes(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
347 		 uint64_t lba, uint32_t lba_count,
348 		 struct spdk_bs_dev_cb_args *cb_args)
349 {
350 	uint64_t offset, length;
351 
352 	if (g_power_failure_thresholds.write_zero_threshold != 0) {
353 		g_power_failure_counters.write_zero_counter++;
354 	}
355 
356 	if (g_power_failure_thresholds.general_threshold != 0) {
357 		g_power_failure_counters.general_counter++;
358 	}
359 
360 	if ((g_power_failure_thresholds.write_zero_threshold == 0 ||
361 	     g_power_failure_counters.write_zero_counter < g_power_failure_thresholds.write_zero_threshold)  &&
362 	    (g_power_failure_thresholds.general_threshold == 0 ||
363 	     g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
364 		offset = lba * dev->blocklen;
365 		length = lba_count * dev->blocklen;
366 		SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
367 		memset(&g_dev_buffer[offset], 0, length);
368 		g_dev_write_bytes += length;
369 	} else {
370 		g_power_failure_rc = -EIO;
371 	}
372 
373 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
374 }
375 
376 static struct spdk_bs_dev *
377 init_dev(void)
378 {
379 	struct spdk_bs_dev *dev = calloc(1, sizeof(*dev));
380 
381 	SPDK_CU_ASSERT_FATAL(dev != NULL);
382 
383 	dev->create_channel = dev_create_channel;
384 	dev->destroy_channel = dev_destroy_channel;
385 	dev->destroy = dev_destroy;
386 	dev->read = dev_read;
387 	dev->write = dev_write;
388 	dev->readv = dev_readv;
389 	dev->writev = dev_writev;
390 	dev->flush = dev_flush;
391 	dev->unmap = dev_unmap;
392 	dev->write_zeroes = dev_write_zeroes;
393 	dev->blockcnt = DEV_BUFFER_BLOCKCNT;
394 	dev->blocklen = DEV_BUFFER_BLOCKLEN;
395 
396 	return dev;
397 }
398