xref: /spdk/test/unit/lib/blob/bs_dev_common.c (revision b30d57cdad6d2bc75cc1e4e2ebbcebcb0d98dcfa)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/thread.h"
35 #include "bs_scheduler.c"
36 
37 
38 #define DEV_BUFFER_SIZE (64 * 1024 * 1024)
39 #define DEV_BUFFER_BLOCKLEN (4096)
40 #define DEV_BUFFER_BLOCKCNT (DEV_BUFFER_SIZE / DEV_BUFFER_BLOCKLEN)
41 uint8_t *g_dev_buffer;
42 uint64_t g_dev_write_bytes;
43 uint64_t g_dev_read_bytes;
44 
45 struct spdk_power_failure_counters {
46 	uint64_t general_counter;
47 	uint64_t read_counter;
48 	uint64_t write_counter;
49 	uint64_t unmap_counter;
50 	uint64_t write_zero_counter;
51 	uint64_t flush_counter;
52 };
53 
54 static struct spdk_power_failure_counters g_power_failure_counters = {};
55 
56 struct spdk_power_failure_thresholds {
57 	uint64_t general_threshold;
58 	uint64_t read_threshold;
59 	uint64_t write_threshold;
60 	uint64_t unmap_threshold;
61 	uint64_t write_zero_threshold;
62 	uint64_t flush_threshold;
63 };
64 
65 static struct spdk_power_failure_thresholds g_power_failure_thresholds = {};
66 
67 static uint64_t g_power_failure_rc;
68 
69 void dev_reset_power_failure_event(void);
70 void dev_reset_power_failure_counters(void);
71 void dev_set_power_failure_thresholds(struct spdk_power_failure_thresholds thresholds);
72 
73 void
74 dev_reset_power_failure_event(void)
75 {
76 	memset(&g_power_failure_counters, 0, sizeof(g_power_failure_counters));
77 	memset(&g_power_failure_thresholds, 0, sizeof(g_power_failure_thresholds));
78 	g_power_failure_rc = 0;
79 }
80 
81 void
82 dev_reset_power_failure_counters(void)
83 {
84 	memset(&g_power_failure_counters, 0, sizeof(g_power_failure_counters));
85 	g_power_failure_rc = 0;
86 }
87 
88 /**
89  * Set power failure event. Power failure will occur after given number
90  * of IO operations. It may occure after number of particular operations
91  * (read, write, unmap, write zero or flush) or after given number of
92  * any IO operations (general_treshold). Value 0 means that the treshold
93  * is disabled. Any other value is the number of operation starting from
94  * which power failure event will happen.
95  */
96 void
97 dev_set_power_failure_thresholds(struct spdk_power_failure_thresholds thresholds)
98 {
99 	g_power_failure_thresholds = thresholds;
100 }
101 
102 /* Define here for UT only. */
103 struct spdk_io_channel g_io_channel;
104 
105 static struct spdk_io_channel *
106 dev_create_channel(struct spdk_bs_dev *dev)
107 {
108 	return &g_io_channel;
109 }
110 
111 static void
112 dev_destroy_channel(struct spdk_bs_dev *dev, struct spdk_io_channel *channel)
113 {
114 }
115 
116 static void
117 dev_destroy(struct spdk_bs_dev *dev)
118 {
119 	free(dev);
120 }
121 
122 
123 static void
124 dev_complete_cb(void *arg)
125 {
126 	struct spdk_bs_dev_cb_args *cb_args = arg;
127 
128 	cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, g_power_failure_rc);
129 }
130 
131 static void
132 dev_complete(void *arg)
133 {
134 	_bs_send_msg(dev_complete_cb, arg, NULL);
135 }
136 
137 static void
138 dev_read(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *payload,
139 	 uint64_t lba, uint32_t lba_count,
140 	 struct spdk_bs_dev_cb_args *cb_args)
141 {
142 	uint64_t offset, length;
143 
144 	if (g_power_failure_thresholds.read_threshold != 0) {
145 		g_power_failure_counters.read_counter++;
146 	}
147 
148 	if (g_power_failure_thresholds.general_threshold != 0) {
149 		g_power_failure_counters.general_counter++;
150 	}
151 
152 	if ((g_power_failure_thresholds.read_threshold == 0 ||
153 	     g_power_failure_counters.read_counter < g_power_failure_thresholds.read_threshold) &&
154 	    (g_power_failure_thresholds.general_threshold == 0 ||
155 	     g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
156 		offset = lba * dev->blocklen;
157 		length = lba_count * dev->blocklen;
158 		SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
159 
160 		memcpy(payload, &g_dev_buffer[offset], length);
161 		g_dev_read_bytes += length;
162 	} else {
163 		g_power_failure_rc = -EIO;
164 	}
165 
166 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
167 }
168 
169 static void
170 dev_write(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *payload,
171 	  uint64_t lba, uint32_t lba_count,
172 	  struct spdk_bs_dev_cb_args *cb_args)
173 {
174 	uint64_t offset, length;
175 
176 	if (g_power_failure_thresholds.write_threshold != 0) {
177 		g_power_failure_counters.write_counter++;
178 	}
179 
180 	if (g_power_failure_thresholds.general_threshold != 0) {
181 		g_power_failure_counters.general_counter++;
182 	}
183 
184 	if ((g_power_failure_thresholds.write_threshold == 0 ||
185 	     g_power_failure_counters.write_counter < g_power_failure_thresholds.write_threshold) &&
186 	    (g_power_failure_thresholds.general_threshold == 0 ||
187 	     g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
188 		offset = lba * dev->blocklen;
189 		length = lba_count * dev->blocklen;
190 		SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
191 
192 		memcpy(&g_dev_buffer[offset], payload, length);
193 		g_dev_write_bytes += length;
194 	} else {
195 		g_power_failure_rc = -EIO;
196 	}
197 
198 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
199 }
200 
201 static void
202 __check_iov(struct iovec *iov, int iovcnt, uint64_t length)
203 {
204 	int i;
205 
206 	for (i = 0; i < iovcnt; i++) {
207 		length -= iov[i].iov_len;
208 	}
209 
210 	CU_ASSERT(length == 0);
211 }
212 
213 static void
214 dev_readv(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
215 	  struct iovec *iov, int iovcnt,
216 	  uint64_t lba, uint32_t lba_count,
217 	  struct spdk_bs_dev_cb_args *cb_args)
218 {
219 	uint64_t offset, length;
220 	int i;
221 
222 	if (g_power_failure_thresholds.read_threshold != 0) {
223 		g_power_failure_counters.read_counter++;
224 	}
225 
226 	if (g_power_failure_thresholds.general_threshold != 0) {
227 		g_power_failure_counters.general_counter++;
228 	}
229 
230 	if ((g_power_failure_thresholds.read_threshold == 0 ||
231 	     g_power_failure_counters.read_counter < g_power_failure_thresholds.read_threshold) &&
232 	    (g_power_failure_thresholds.general_threshold == 0 ||
233 	     g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
234 		offset = lba * dev->blocklen;
235 		length = lba_count * dev->blocklen;
236 		SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
237 		__check_iov(iov, iovcnt, length);
238 
239 		for (i = 0; i < iovcnt; i++) {
240 			memcpy(iov[i].iov_base, &g_dev_buffer[offset], iov[i].iov_len);
241 			offset += iov[i].iov_len;
242 		}
243 
244 		g_dev_read_bytes += length;
245 	} else {
246 		g_power_failure_rc = -EIO;
247 	}
248 
249 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
250 }
251 
252 static void
253 dev_writev(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
254 	   struct iovec *iov, int iovcnt,
255 	   uint64_t lba, uint32_t lba_count,
256 	   struct spdk_bs_dev_cb_args *cb_args)
257 {
258 	uint64_t offset, length;
259 	int i;
260 
261 	if (g_power_failure_thresholds.write_threshold != 0) {
262 		g_power_failure_counters.write_counter++;
263 	}
264 
265 	if (g_power_failure_thresholds.general_threshold != 0) {
266 		g_power_failure_counters.general_counter++;
267 	}
268 
269 	if ((g_power_failure_thresholds.write_threshold == 0 ||
270 	     g_power_failure_counters.write_counter < g_power_failure_thresholds.write_threshold)  &&
271 	    (g_power_failure_thresholds.general_threshold == 0 ||
272 	     g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
273 		offset = lba * dev->blocklen;
274 		length = lba_count * dev->blocklen;
275 		SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
276 		__check_iov(iov, iovcnt, length);
277 
278 		for (i = 0; i < iovcnt; i++) {
279 			memcpy(&g_dev_buffer[offset], iov[i].iov_base, iov[i].iov_len);
280 			offset += iov[i].iov_len;
281 		}
282 
283 		g_dev_write_bytes += length;
284 	} else {
285 		g_power_failure_rc = -EIO;
286 	}
287 
288 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
289 }
290 
291 static void
292 dev_flush(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
293 	  struct spdk_bs_dev_cb_args *cb_args)
294 {
295 	if (g_power_failure_thresholds.flush_threshold != 0) {
296 		g_power_failure_counters.flush_counter++;
297 	}
298 
299 	if (g_power_failure_thresholds.general_threshold != 0) {
300 		g_power_failure_counters.general_counter++;
301 	}
302 
303 	if ((g_power_failure_thresholds.flush_threshold != 0 &&
304 	     g_power_failure_counters.flush_counter >= g_power_failure_thresholds.flush_threshold)  ||
305 	    (g_power_failure_thresholds.general_threshold != 0 &&
306 	     g_power_failure_counters.general_counter >= g_power_failure_thresholds.general_threshold)) {
307 		g_power_failure_rc = -EIO;
308 	}
309 
310 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
311 }
312 
313 static void
314 dev_unmap(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
315 	  uint64_t lba, uint32_t lba_count,
316 	  struct spdk_bs_dev_cb_args *cb_args)
317 {
318 	uint64_t offset, length;
319 
320 	if (g_power_failure_thresholds.unmap_threshold != 0) {
321 		g_power_failure_counters.unmap_counter++;
322 	}
323 
324 	if (g_power_failure_thresholds.general_threshold != 0) {
325 		g_power_failure_counters.general_counter++;
326 	}
327 
328 	if ((g_power_failure_thresholds.unmap_threshold == 0 ||
329 	     g_power_failure_counters.unmap_counter < g_power_failure_thresholds.unmap_threshold)  &&
330 	    (g_power_failure_thresholds.general_threshold == 0 ||
331 	     g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
332 		offset = lba * dev->blocklen;
333 		length = lba_count * dev->blocklen;
334 		SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
335 		memset(&g_dev_buffer[offset], 0, length);
336 	} else {
337 		g_power_failure_rc = -EIO;
338 	}
339 
340 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
341 }
342 
343 static void
344 dev_write_zeroes(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
345 		 uint64_t lba, uint32_t lba_count,
346 		 struct spdk_bs_dev_cb_args *cb_args)
347 {
348 	uint64_t offset, length;
349 
350 	if (g_power_failure_thresholds.write_zero_threshold != 0) {
351 		g_power_failure_counters.write_zero_counter++;
352 	}
353 
354 	if (g_power_failure_thresholds.general_threshold != 0) {
355 		g_power_failure_counters.general_counter++;
356 	}
357 
358 	if ((g_power_failure_thresholds.write_zero_threshold == 0 ||
359 	     g_power_failure_counters.write_zero_counter < g_power_failure_thresholds.write_zero_threshold)  &&
360 	    (g_power_failure_thresholds.general_threshold == 0 ||
361 	     g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
362 		offset = lba * dev->blocklen;
363 		length = lba_count * dev->blocklen;
364 		SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
365 		memset(&g_dev_buffer[offset], 0, length);
366 		g_dev_write_bytes += length;
367 	} else {
368 		g_power_failure_rc = -EIO;
369 	}
370 
371 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
372 }
373 
374 static struct spdk_bs_dev *
375 init_dev(void)
376 {
377 	struct spdk_bs_dev *dev = calloc(1, sizeof(*dev));
378 
379 	SPDK_CU_ASSERT_FATAL(dev != NULL);
380 
381 	dev->create_channel = dev_create_channel;
382 	dev->destroy_channel = dev_destroy_channel;
383 	dev->destroy = dev_destroy;
384 	dev->read = dev_read;
385 	dev->write = dev_write;
386 	dev->readv = dev_readv;
387 	dev->writev = dev_writev;
388 	dev->flush = dev_flush;
389 	dev->unmap = dev_unmap;
390 	dev->write_zeroes = dev_write_zeroes;
391 	dev->blockcnt = DEV_BUFFER_BLOCKCNT;
392 	dev->blocklen = DEV_BUFFER_BLOCKLEN;
393 
394 	return dev;
395 }
396