xref: /spdk/test/unit/lib/blob/bs_dev_common.c (revision 8afdeef3becfe9409cc9e7372bd0bc10e8b7d46d)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "thread/thread_internal.h"
8 #include "bs_scheduler.c"
9 
10 
11 #define DEV_BUFFER_SIZE (64 * 1024 * 1024)
12 #define DEV_BUFFER_BLOCKLEN (4096)
13 #define DEV_BUFFER_BLOCKCNT (DEV_BUFFER_SIZE / DEV_BUFFER_BLOCKLEN)
14 uint8_t *g_dev_buffer;
15 uint64_t g_dev_write_bytes;
16 uint64_t g_dev_read_bytes;
17 uint64_t g_dev_copy_bytes;
18 bool g_dev_writev_ext_called;
19 bool g_dev_readv_ext_called;
20 bool g_dev_copy_enabled;
21 struct spdk_blob_ext_io_opts g_blob_ext_io_opts;
22 
23 struct spdk_power_failure_counters {
24 	uint64_t general_counter;
25 	uint64_t read_counter;
26 	uint64_t write_counter;
27 	uint64_t unmap_counter;
28 	uint64_t write_zero_counter;
29 	uint64_t flush_counter;
30 };
31 
32 static struct spdk_power_failure_counters g_power_failure_counters = {};
33 
34 struct spdk_power_failure_thresholds {
35 	uint64_t general_threshold;
36 	uint64_t read_threshold;
37 	uint64_t write_threshold;
38 	uint64_t unmap_threshold;
39 	uint64_t write_zero_threshold;
40 	uint64_t flush_threshold;
41 };
42 
43 static struct spdk_power_failure_thresholds g_power_failure_thresholds = {};
44 
45 static uint64_t g_power_failure_rc;
46 
47 void dev_reset_power_failure_event(void);
48 void dev_reset_power_failure_counters(void);
49 void dev_set_power_failure_thresholds(struct spdk_power_failure_thresholds thresholds);
50 
51 void
52 dev_reset_power_failure_event(void)
53 {
54 	memset(&g_power_failure_counters, 0, sizeof(g_power_failure_counters));
55 	memset(&g_power_failure_thresholds, 0, sizeof(g_power_failure_thresholds));
56 	g_power_failure_rc = 0;
57 }
58 
59 void
60 dev_reset_power_failure_counters(void)
61 {
62 	memset(&g_power_failure_counters, 0, sizeof(g_power_failure_counters));
63 	g_power_failure_rc = 0;
64 }
65 
66 /**
67  * Set power failure event. Power failure will occur after given number
68  * of IO operations. It may occur after number of particular operations
69  * (read, write, unmap, write zero or flush) or after given number of
70  * any IO operations (general_threshold). Value 0 means that the threshold
71  * is disabled. Any other value is the number of operation starting from
72  * which power failure event will happen.
73  */
74 void
75 dev_set_power_failure_thresholds(struct spdk_power_failure_thresholds thresholds)
76 {
77 	g_power_failure_thresholds = thresholds;
78 }
79 
80 /* Define here for UT only. */
81 struct spdk_io_channel g_io_channel;
82 
83 static struct spdk_io_channel *
84 dev_create_channel(struct spdk_bs_dev *dev)
85 {
86 	return &g_io_channel;
87 }
88 
89 static void
90 dev_destroy_channel(struct spdk_bs_dev *dev, struct spdk_io_channel *channel)
91 {
92 }
93 
94 static void
95 dev_destroy(struct spdk_bs_dev *dev)
96 {
97 	free(dev);
98 }
99 
100 
101 static void
102 dev_complete_cb(void *arg)
103 {
104 	struct spdk_bs_dev_cb_args *cb_args = arg;
105 
106 	cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, g_power_failure_rc);
107 }
108 
109 static void
110 dev_complete(void *arg)
111 {
112 	_bs_send_msg(dev_complete_cb, arg, NULL);
113 }
114 
115 static void
116 dev_read(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *payload,
117 	 uint64_t lba, uint32_t lba_count,
118 	 struct spdk_bs_dev_cb_args *cb_args)
119 {
120 	uint64_t offset, length;
121 
122 	if (g_power_failure_thresholds.read_threshold != 0) {
123 		g_power_failure_counters.read_counter++;
124 	}
125 
126 	if (g_power_failure_thresholds.general_threshold != 0) {
127 		g_power_failure_counters.general_counter++;
128 	}
129 
130 	if ((g_power_failure_thresholds.read_threshold == 0 ||
131 	     g_power_failure_counters.read_counter < g_power_failure_thresholds.read_threshold) &&
132 	    (g_power_failure_thresholds.general_threshold == 0 ||
133 	     g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
134 		offset = lba * dev->blocklen;
135 		length = lba_count * dev->blocklen;
136 		SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
137 
138 		if (length > 0) {
139 			memcpy(payload, &g_dev_buffer[offset], length);
140 			g_dev_read_bytes += length;
141 		}
142 	} else {
143 		g_power_failure_rc = -EIO;
144 	}
145 
146 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
147 }
148 
149 static void
150 dev_write(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *payload,
151 	  uint64_t lba, uint32_t lba_count,
152 	  struct spdk_bs_dev_cb_args *cb_args)
153 {
154 	uint64_t offset, length;
155 
156 	if (g_power_failure_thresholds.write_threshold != 0) {
157 		g_power_failure_counters.write_counter++;
158 	}
159 
160 	if (g_power_failure_thresholds.general_threshold != 0) {
161 		g_power_failure_counters.general_counter++;
162 	}
163 
164 	if ((g_power_failure_thresholds.write_threshold == 0 ||
165 	     g_power_failure_counters.write_counter < g_power_failure_thresholds.write_threshold) &&
166 	    (g_power_failure_thresholds.general_threshold == 0 ||
167 	     g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
168 		offset = lba * dev->blocklen;
169 		length = lba_count * dev->blocklen;
170 		SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
171 
172 		memcpy(&g_dev_buffer[offset], payload, length);
173 		g_dev_write_bytes += length;
174 	} else {
175 		g_power_failure_rc = -EIO;
176 	}
177 
178 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
179 }
180 
181 static void
182 __check_iov(struct iovec *iov, int iovcnt, uint64_t length)
183 {
184 	int i;
185 
186 	for (i = 0; i < iovcnt; i++) {
187 		length -= iov[i].iov_len;
188 	}
189 
190 	CU_ASSERT(length == 0);
191 }
192 
193 static void
194 dev_readv(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
195 	  struct iovec *iov, int iovcnt,
196 	  uint64_t lba, uint32_t lba_count,
197 	  struct spdk_bs_dev_cb_args *cb_args)
198 {
199 	uint64_t offset, length;
200 	int i;
201 
202 	if (g_power_failure_thresholds.read_threshold != 0) {
203 		g_power_failure_counters.read_counter++;
204 	}
205 
206 	if (g_power_failure_thresholds.general_threshold != 0) {
207 		g_power_failure_counters.general_counter++;
208 	}
209 
210 	if ((g_power_failure_thresholds.read_threshold == 0 ||
211 	     g_power_failure_counters.read_counter < g_power_failure_thresholds.read_threshold) &&
212 	    (g_power_failure_thresholds.general_threshold == 0 ||
213 	     g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
214 		offset = lba * dev->blocklen;
215 		length = lba_count * dev->blocklen;
216 		SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
217 		__check_iov(iov, iovcnt, length);
218 
219 		for (i = 0; i < iovcnt; i++) {
220 			memcpy(iov[i].iov_base, &g_dev_buffer[offset], iov[i].iov_len);
221 			offset += iov[i].iov_len;
222 		}
223 
224 		g_dev_read_bytes += length;
225 	} else {
226 		g_power_failure_rc = -EIO;
227 	}
228 
229 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
230 }
231 
232 static void
233 dev_readv_ext(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
234 	      struct iovec *iov, int iovcnt,
235 	      uint64_t lba, uint32_t lba_count,
236 	      struct spdk_bs_dev_cb_args *cb_args,
237 	      struct spdk_blob_ext_io_opts *io_opts)
238 {
239 	g_dev_readv_ext_called = true;
240 	g_blob_ext_io_opts = *io_opts;
241 	dev_readv(dev, channel, iov, iovcnt, lba, lba_count, cb_args);
242 }
243 
244 static void
245 dev_writev(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
246 	   struct iovec *iov, int iovcnt,
247 	   uint64_t lba, uint32_t lba_count,
248 	   struct spdk_bs_dev_cb_args *cb_args)
249 {
250 	uint64_t offset, length;
251 	int i;
252 
253 	if (g_power_failure_thresholds.write_threshold != 0) {
254 		g_power_failure_counters.write_counter++;
255 	}
256 
257 	if (g_power_failure_thresholds.general_threshold != 0) {
258 		g_power_failure_counters.general_counter++;
259 	}
260 
261 	if ((g_power_failure_thresholds.write_threshold == 0 ||
262 	     g_power_failure_counters.write_counter < g_power_failure_thresholds.write_threshold)  &&
263 	    (g_power_failure_thresholds.general_threshold == 0 ||
264 	     g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
265 		offset = lba * dev->blocklen;
266 		length = lba_count * dev->blocklen;
267 		SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
268 		__check_iov(iov, iovcnt, length);
269 
270 		for (i = 0; i < iovcnt; i++) {
271 			memcpy(&g_dev_buffer[offset], iov[i].iov_base, iov[i].iov_len);
272 			offset += iov[i].iov_len;
273 		}
274 
275 		g_dev_write_bytes += length;
276 	} else {
277 		g_power_failure_rc = -EIO;
278 	}
279 
280 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
281 }
282 
283 static void
284 dev_writev_ext(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
285 	       struct iovec *iov, int iovcnt,
286 	       uint64_t lba, uint32_t lba_count,
287 	       struct spdk_bs_dev_cb_args *cb_args,
288 	       struct spdk_blob_ext_io_opts *io_opts)
289 {
290 	g_dev_writev_ext_called = true;
291 	g_blob_ext_io_opts = *io_opts;
292 	dev_writev(dev, channel, iov, iovcnt, lba, lba_count, cb_args);
293 }
294 
295 static void
296 dev_flush(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
297 	  struct spdk_bs_dev_cb_args *cb_args)
298 {
299 	if (g_power_failure_thresholds.flush_threshold != 0) {
300 		g_power_failure_counters.flush_counter++;
301 	}
302 
303 	if (g_power_failure_thresholds.general_threshold != 0) {
304 		g_power_failure_counters.general_counter++;
305 	}
306 
307 	if ((g_power_failure_thresholds.flush_threshold != 0 &&
308 	     g_power_failure_counters.flush_counter >= g_power_failure_thresholds.flush_threshold)  ||
309 	    (g_power_failure_thresholds.general_threshold != 0 &&
310 	     g_power_failure_counters.general_counter >= g_power_failure_thresholds.general_threshold)) {
311 		g_power_failure_rc = -EIO;
312 	}
313 
314 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
315 }
316 
317 static void
318 dev_unmap(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
319 	  uint64_t lba, uint64_t lba_count,
320 	  struct spdk_bs_dev_cb_args *cb_args)
321 {
322 	uint64_t offset, length;
323 
324 	if (g_power_failure_thresholds.unmap_threshold != 0) {
325 		g_power_failure_counters.unmap_counter++;
326 	}
327 
328 	if (g_power_failure_thresholds.general_threshold != 0) {
329 		g_power_failure_counters.general_counter++;
330 	}
331 
332 	if ((g_power_failure_thresholds.unmap_threshold == 0 ||
333 	     g_power_failure_counters.unmap_counter < g_power_failure_thresholds.unmap_threshold)  &&
334 	    (g_power_failure_thresholds.general_threshold == 0 ||
335 	     g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
336 		offset = lba * dev->blocklen;
337 		length = lba_count * dev->blocklen;
338 		SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
339 		memset(&g_dev_buffer[offset], 0, length);
340 	} else {
341 		g_power_failure_rc = -EIO;
342 	}
343 
344 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
345 }
346 
347 static void
348 dev_write_zeroes(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
349 		 uint64_t lba, uint64_t lba_count,
350 		 struct spdk_bs_dev_cb_args *cb_args)
351 {
352 	uint64_t offset, length;
353 
354 	if (g_power_failure_thresholds.write_zero_threshold != 0) {
355 		g_power_failure_counters.write_zero_counter++;
356 	}
357 
358 	if (g_power_failure_thresholds.general_threshold != 0) {
359 		g_power_failure_counters.general_counter++;
360 	}
361 
362 	if ((g_power_failure_thresholds.write_zero_threshold == 0 ||
363 	     g_power_failure_counters.write_zero_counter < g_power_failure_thresholds.write_zero_threshold)  &&
364 	    (g_power_failure_thresholds.general_threshold == 0 ||
365 	     g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
366 		offset = lba * dev->blocklen;
367 		length = lba_count * dev->blocklen;
368 		SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
369 		memset(&g_dev_buffer[offset], 0, length);
370 		g_dev_write_bytes += length;
371 	} else {
372 		g_power_failure_rc = -EIO;
373 	}
374 
375 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
376 }
377 
378 static bool
379 dev_translate_lba(struct spdk_bs_dev *dev, uint64_t lba, uint64_t *base_lba)
380 {
381 	*base_lba = lba;
382 	return true;
383 }
384 
385 static void
386 dev_copy(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, uint64_t dst_lba,
387 	 uint64_t src_lba, uint64_t lba_count, struct spdk_bs_dev_cb_args *cb_args)
388 {
389 	void *dst = &g_dev_buffer[dst_lba * dev->blocklen];
390 	const void *src = &g_dev_buffer[src_lba * dev->blocklen];
391 	uint64_t size = lba_count * dev->blocklen;
392 
393 	memcpy(dst, src, size);
394 	g_dev_copy_bytes += size;
395 
396 	cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, 0);
397 }
398 
399 static struct spdk_bs_dev *
400 init_dev(void)
401 {
402 	struct spdk_bs_dev *dev = calloc(1, sizeof(*dev));
403 
404 	SPDK_CU_ASSERT_FATAL(dev != NULL);
405 
406 	dev->create_channel = dev_create_channel;
407 	dev->destroy_channel = dev_destroy_channel;
408 	dev->destroy = dev_destroy;
409 	dev->read = dev_read;
410 	dev->write = dev_write;
411 	dev->readv = dev_readv;
412 	dev->writev = dev_writev;
413 	dev->readv_ext = dev_readv_ext;
414 	dev->writev_ext = dev_writev_ext;
415 	dev->flush = dev_flush;
416 	dev->unmap = dev_unmap;
417 	dev->write_zeroes = dev_write_zeroes;
418 	dev->translate_lba = dev_translate_lba;
419 	dev->copy = g_dev_copy_enabled ? dev_copy : NULL;
420 	dev->blockcnt = DEV_BUFFER_BLOCKCNT;
421 	dev->blocklen = DEV_BUFFER_BLOCKLEN;
422 
423 	return dev;
424 }
425