xref: /spdk/test/unit/lib/blob/bs_dev_common.c (revision 2dc4a231ac65d10dd2e1a96684094bef1b7ebb95)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "thread/thread_internal.h"
8 #include "bs_scheduler.c"
9 
10 
11 #define DEV_BUFFER_SIZE (64 * 1024 * 1024)
12 #define DEV_BUFFER_BLOCKLEN (4096)
13 #define DEV_BUFFER_BLOCKCNT (DEV_BUFFER_SIZE / DEV_BUFFER_BLOCKLEN)
14 #define DEV_MAX_PHYS_BLOCKLEN (16384)
15 #define FIRST_DATA_CLUSTER(bs) \
16 	((DEV_BUFFER_SIZE / spdk_bs_get_cluster_size(bs)) - spdk_bs_total_data_cluster_count(bs))
17 
18 uint8_t *g_dev_buffer;
19 uint64_t g_dev_write_bytes;
20 uint64_t g_dev_read_bytes;
21 uint64_t g_dev_copy_bytes;
22 bool g_dev_writev_ext_called;
23 bool g_dev_readv_ext_called;
24 bool g_dev_copy_enabled;
25 struct spdk_blob_ext_io_opts g_blob_ext_io_opts;
26 uint32_t g_phys_blocklen;
27 
28 struct spdk_power_failure_counters {
29 	uint64_t general_counter;
30 	uint64_t read_counter;
31 	uint64_t write_counter;
32 	uint64_t unmap_counter;
33 	uint64_t write_zero_counter;
34 	uint64_t flush_counter;
35 };
36 
37 static struct spdk_power_failure_counters g_power_failure_counters = {};
38 
39 struct spdk_power_failure_thresholds {
40 	uint64_t general_threshold;
41 	uint64_t read_threshold;
42 	uint64_t write_threshold;
43 	uint64_t unmap_threshold;
44 	uint64_t write_zero_threshold;
45 	uint64_t flush_threshold;
46 };
47 
48 static struct spdk_power_failure_thresholds g_power_failure_thresholds = {};
49 
50 static uint64_t g_power_failure_rc;
51 
52 void dev_reset_power_failure_event(void);
53 void dev_reset_power_failure_counters(void);
54 void dev_set_power_failure_thresholds(struct spdk_power_failure_thresholds thresholds);
55 
56 void
57 dev_reset_power_failure_event(void)
58 {
59 	memset(&g_power_failure_counters, 0, sizeof(g_power_failure_counters));
60 	memset(&g_power_failure_thresholds, 0, sizeof(g_power_failure_thresholds));
61 	g_power_failure_rc = 0;
62 }
63 
64 void
65 dev_reset_power_failure_counters(void)
66 {
67 	memset(&g_power_failure_counters, 0, sizeof(g_power_failure_counters));
68 	g_power_failure_rc = 0;
69 }
70 
71 /**
72  * Set power failure event. Power failure will occur after given number
73  * of IO operations. It may occur after number of particular operations
74  * (read, write, unmap, write zero or flush) or after given number of
75  * any IO operations (general_threshold). Value 0 means that the threshold
76  * is disabled. Any other value is the number of operation starting from
77  * which power failure event will happen.
78  */
79 void
80 dev_set_power_failure_thresholds(struct spdk_power_failure_thresholds thresholds)
81 {
82 	g_power_failure_thresholds = thresholds;
83 }
84 
85 /* Define here for UT only. */
86 struct spdk_io_channel g_io_channel;
87 
88 static struct spdk_io_channel *
89 dev_create_channel(struct spdk_bs_dev *dev)
90 {
91 	return &g_io_channel;
92 }
93 
94 static void
95 dev_destroy_channel(struct spdk_bs_dev *dev, struct spdk_io_channel *channel)
96 {
97 }
98 
99 static void
100 dev_destroy(struct spdk_bs_dev *dev)
101 {
102 	free(dev);
103 }
104 
105 
106 static void
107 dev_complete_cb(void *arg)
108 {
109 	struct spdk_bs_dev_cb_args *cb_args = arg;
110 
111 	cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, g_power_failure_rc);
112 }
113 
114 static void
115 dev_complete(void *arg)
116 {
117 	_bs_send_msg(dev_complete_cb, arg, NULL);
118 }
119 
120 static void
121 dev_read(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *payload,
122 	 uint64_t lba, uint32_t lba_count,
123 	 struct spdk_bs_dev_cb_args *cb_args)
124 {
125 	uint64_t offset, length;
126 
127 	if (g_power_failure_thresholds.read_threshold != 0) {
128 		g_power_failure_counters.read_counter++;
129 	}
130 
131 	if (g_power_failure_thresholds.general_threshold != 0) {
132 		g_power_failure_counters.general_counter++;
133 	}
134 
135 	if ((g_power_failure_thresholds.read_threshold == 0 ||
136 	     g_power_failure_counters.read_counter < g_power_failure_thresholds.read_threshold) &&
137 	    (g_power_failure_thresholds.general_threshold == 0 ||
138 	     g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
139 		offset = lba * dev->blocklen;
140 		length = lba_count * dev->blocklen;
141 		SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
142 
143 		if (length > 0) {
144 			memcpy(payload, &g_dev_buffer[offset], length);
145 			g_dev_read_bytes += length;
146 		}
147 	} else {
148 		g_power_failure_rc = -EIO;
149 	}
150 
151 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
152 }
153 
154 static void
155 dev_write(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *payload,
156 	  uint64_t lba, uint32_t lba_count,
157 	  struct spdk_bs_dev_cb_args *cb_args)
158 {
159 	uint64_t offset, length;
160 
161 	if (g_power_failure_thresholds.write_threshold != 0) {
162 		g_power_failure_counters.write_counter++;
163 	}
164 
165 	if (g_power_failure_thresholds.general_threshold != 0) {
166 		g_power_failure_counters.general_counter++;
167 	}
168 
169 	if ((g_power_failure_thresholds.write_threshold == 0 ||
170 	     g_power_failure_counters.write_counter < g_power_failure_thresholds.write_threshold) &&
171 	    (g_power_failure_thresholds.general_threshold == 0 ||
172 	     g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
173 		offset = lba * dev->blocklen;
174 		length = lba_count * dev->blocklen;
175 		SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
176 
177 		memcpy(&g_dev_buffer[offset], payload, length);
178 		g_dev_write_bytes += length;
179 	} else {
180 		g_power_failure_rc = -EIO;
181 	}
182 
183 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
184 }
185 
186 static void
187 __check_iov(struct iovec *iov, int iovcnt, uint64_t length)
188 {
189 	int i;
190 
191 	for (i = 0; i < iovcnt; i++) {
192 		length -= iov[i].iov_len;
193 	}
194 
195 	CU_ASSERT(length == 0);
196 }
197 
198 static void
199 dev_readv(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
200 	  struct iovec *iov, int iovcnt,
201 	  uint64_t lba, uint32_t lba_count,
202 	  struct spdk_bs_dev_cb_args *cb_args)
203 {
204 	uint64_t offset, length;
205 	int i;
206 
207 	if (g_power_failure_thresholds.read_threshold != 0) {
208 		g_power_failure_counters.read_counter++;
209 	}
210 
211 	if (g_power_failure_thresholds.general_threshold != 0) {
212 		g_power_failure_counters.general_counter++;
213 	}
214 
215 	if ((g_power_failure_thresholds.read_threshold == 0 ||
216 	     g_power_failure_counters.read_counter < g_power_failure_thresholds.read_threshold) &&
217 	    (g_power_failure_thresholds.general_threshold == 0 ||
218 	     g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
219 		offset = lba * dev->blocklen;
220 		length = lba_count * dev->blocklen;
221 		SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
222 		__check_iov(iov, iovcnt, length);
223 
224 		for (i = 0; i < iovcnt; i++) {
225 			memcpy(iov[i].iov_base, &g_dev_buffer[offset], iov[i].iov_len);
226 			offset += iov[i].iov_len;
227 		}
228 
229 		g_dev_read_bytes += length;
230 	} else {
231 		g_power_failure_rc = -EIO;
232 	}
233 
234 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
235 }
236 
237 static void
238 dev_readv_ext(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
239 	      struct iovec *iov, int iovcnt,
240 	      uint64_t lba, uint32_t lba_count,
241 	      struct spdk_bs_dev_cb_args *cb_args,
242 	      struct spdk_blob_ext_io_opts *io_opts)
243 {
244 	g_dev_readv_ext_called = true;
245 	g_blob_ext_io_opts = *io_opts;
246 	dev_readv(dev, channel, iov, iovcnt, lba, lba_count, cb_args);
247 }
248 
249 static void
250 dev_writev(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
251 	   struct iovec *iov, int iovcnt,
252 	   uint64_t lba, uint32_t lba_count,
253 	   struct spdk_bs_dev_cb_args *cb_args)
254 {
255 	uint64_t offset, length;
256 	int i;
257 
258 	if (g_power_failure_thresholds.write_threshold != 0) {
259 		g_power_failure_counters.write_counter++;
260 	}
261 
262 	if (g_power_failure_thresholds.general_threshold != 0) {
263 		g_power_failure_counters.general_counter++;
264 	}
265 
266 	if ((g_power_failure_thresholds.write_threshold == 0 ||
267 	     g_power_failure_counters.write_counter < g_power_failure_thresholds.write_threshold)  &&
268 	    (g_power_failure_thresholds.general_threshold == 0 ||
269 	     g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
270 		offset = lba * dev->blocklen;
271 		length = lba_count * dev->blocklen;
272 		SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
273 		__check_iov(iov, iovcnt, length);
274 
275 		for (i = 0; i < iovcnt; i++) {
276 			memcpy(&g_dev_buffer[offset], iov[i].iov_base, iov[i].iov_len);
277 			offset += iov[i].iov_len;
278 		}
279 
280 		g_dev_write_bytes += length;
281 	} else {
282 		g_power_failure_rc = -EIO;
283 	}
284 
285 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
286 }
287 
288 static void
289 dev_writev_ext(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
290 	       struct iovec *iov, int iovcnt,
291 	       uint64_t lba, uint32_t lba_count,
292 	       struct spdk_bs_dev_cb_args *cb_args,
293 	       struct spdk_blob_ext_io_opts *io_opts)
294 {
295 	g_dev_writev_ext_called = true;
296 	g_blob_ext_io_opts = *io_opts;
297 	dev_writev(dev, channel, iov, iovcnt, lba, lba_count, cb_args);
298 }
299 
300 static void
301 dev_flush(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
302 	  struct spdk_bs_dev_cb_args *cb_args)
303 {
304 	if (g_power_failure_thresholds.flush_threshold != 0) {
305 		g_power_failure_counters.flush_counter++;
306 	}
307 
308 	if (g_power_failure_thresholds.general_threshold != 0) {
309 		g_power_failure_counters.general_counter++;
310 	}
311 
312 	if ((g_power_failure_thresholds.flush_threshold != 0 &&
313 	     g_power_failure_counters.flush_counter >= g_power_failure_thresholds.flush_threshold)  ||
314 	    (g_power_failure_thresholds.general_threshold != 0 &&
315 	     g_power_failure_counters.general_counter >= g_power_failure_thresholds.general_threshold)) {
316 		g_power_failure_rc = -EIO;
317 	}
318 
319 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
320 }
321 
322 static void
323 dev_unmap(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
324 	  uint64_t lba, uint64_t lba_count,
325 	  struct spdk_bs_dev_cb_args *cb_args)
326 {
327 	uint64_t offset, length;
328 
329 	if (g_power_failure_thresholds.unmap_threshold != 0) {
330 		g_power_failure_counters.unmap_counter++;
331 	}
332 
333 	if (g_power_failure_thresholds.general_threshold != 0) {
334 		g_power_failure_counters.general_counter++;
335 	}
336 
337 	if ((g_power_failure_thresholds.unmap_threshold == 0 ||
338 	     g_power_failure_counters.unmap_counter < g_power_failure_thresholds.unmap_threshold)  &&
339 	    (g_power_failure_thresholds.general_threshold == 0 ||
340 	     g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
341 		offset = lba * dev->blocklen;
342 		length = lba_count * dev->blocklen;
343 		SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
344 		memset(&g_dev_buffer[offset], 0, length);
345 	} else {
346 		g_power_failure_rc = -EIO;
347 	}
348 
349 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
350 }
351 
352 static void
353 dev_write_zeroes(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
354 		 uint64_t lba, uint64_t lba_count,
355 		 struct spdk_bs_dev_cb_args *cb_args)
356 {
357 	uint64_t offset, length;
358 
359 	if (g_power_failure_thresholds.write_zero_threshold != 0) {
360 		g_power_failure_counters.write_zero_counter++;
361 	}
362 
363 	if (g_power_failure_thresholds.general_threshold != 0) {
364 		g_power_failure_counters.general_counter++;
365 	}
366 
367 	if ((g_power_failure_thresholds.write_zero_threshold == 0 ||
368 	     g_power_failure_counters.write_zero_counter < g_power_failure_thresholds.write_zero_threshold)  &&
369 	    (g_power_failure_thresholds.general_threshold == 0 ||
370 	     g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
371 		offset = lba * dev->blocklen;
372 		length = lba_count * dev->blocklen;
373 		SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
374 		memset(&g_dev_buffer[offset], 0, length);
375 		g_dev_write_bytes += length;
376 	} else {
377 		g_power_failure_rc = -EIO;
378 	}
379 
380 	spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
381 }
382 
383 static bool
384 dev_translate_lba(struct spdk_bs_dev *dev, uint64_t lba, uint64_t *base_lba)
385 {
386 	*base_lba = lba;
387 	return true;
388 }
389 
390 static void
391 dev_copy(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, uint64_t dst_lba,
392 	 uint64_t src_lba, uint64_t lba_count, struct spdk_bs_dev_cb_args *cb_args)
393 {
394 	void *dst = &g_dev_buffer[dst_lba * dev->blocklen];
395 	const void *src = &g_dev_buffer[src_lba * dev->blocklen];
396 	uint64_t size = lba_count * dev->blocklen;
397 
398 	memcpy(dst, src, size);
399 	g_dev_copy_bytes += size;
400 
401 	cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, 0);
402 }
403 
404 static struct spdk_bs_dev *
405 init_dev(void)
406 {
407 	struct spdk_bs_dev *dev = calloc(1, sizeof(*dev));
408 
409 	SPDK_CU_ASSERT_FATAL(dev != NULL);
410 
411 	dev->create_channel = dev_create_channel;
412 	dev->destroy_channel = dev_destroy_channel;
413 	dev->destroy = dev_destroy;
414 	dev->read = dev_read;
415 	dev->write = dev_write;
416 	dev->readv = dev_readv;
417 	dev->writev = dev_writev;
418 	dev->readv_ext = dev_readv_ext;
419 	dev->writev_ext = dev_writev_ext;
420 	dev->flush = dev_flush;
421 	dev->unmap = dev_unmap;
422 	dev->write_zeroes = dev_write_zeroes;
423 	dev->translate_lba = dev_translate_lba;
424 	dev->copy = g_dev_copy_enabled ? dev_copy : NULL;
425 	dev->blockcnt = DEV_BUFFER_BLOCKCNT;
426 	dev->blocklen = DEV_BUFFER_BLOCKLEN;
427 	dev->phys_blocklen = g_phys_blocklen;
428 
429 	return dev;
430 }
431