xref: /spdk/module/bdev/delay/vbdev_delay.c (revision 510f4c134a21b45ff3a5add9ebc6c6cf7e49aeab)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "vbdev_delay.h"
10 #include "spdk/rpc.h"
11 #include "spdk/env.h"
12 #include "spdk/endian.h"
13 #include "spdk/string.h"
14 #include "spdk/thread.h"
15 #include "spdk/util.h"
16 
17 #include "spdk/bdev_module.h"
18 #include "spdk/log.h"
19 
20 
21 static int vbdev_delay_init(void);
22 static int vbdev_delay_get_ctx_size(void);
23 static void vbdev_delay_examine(struct spdk_bdev *bdev);
24 static void vbdev_delay_finish(void);
25 static int vbdev_delay_config_json(struct spdk_json_write_ctx *w);
26 
27 static struct spdk_bdev_module delay_if = {
28 	.name = "delay",
29 	.module_init = vbdev_delay_init,
30 	.get_ctx_size = vbdev_delay_get_ctx_size,
31 	.examine_config = vbdev_delay_examine,
32 	.module_fini = vbdev_delay_finish,
33 	.config_json = vbdev_delay_config_json
34 };
35 
36 SPDK_BDEV_MODULE_REGISTER(delay, &delay_if)
37 
38 /* Associative list to be used in examine */
39 struct bdev_association {
40 	char			*vbdev_name;
41 	char			*bdev_name;
42 	uint64_t		avg_read_latency;
43 	uint64_t		p99_read_latency;
44 	uint64_t		avg_write_latency;
45 	uint64_t		p99_write_latency;
46 	TAILQ_ENTRY(bdev_association)	link;
47 };
48 static TAILQ_HEAD(, bdev_association) g_bdev_associations = TAILQ_HEAD_INITIALIZER(
49 			g_bdev_associations);
50 
51 /* List of virtual bdevs and associated info for each. */
52 struct vbdev_delay {
53 	struct spdk_bdev		*base_bdev; /* the thing we're attaching to */
54 	struct spdk_bdev_desc		*base_desc; /* its descriptor we get from open */
55 	struct spdk_bdev		delay_bdev;    /* the delay virtual bdev */
56 	uint64_t			average_read_latency_ticks; /* the average read delay */
57 	uint64_t			p99_read_latency_ticks; /* the p99 read delay */
58 	uint64_t			average_write_latency_ticks; /* the average write delay */
59 	uint64_t			p99_write_latency_ticks; /* the p99 write delay */
60 	TAILQ_ENTRY(vbdev_delay)	link;
61 	struct spdk_thread		*thread;    /* thread where base device is opened */
62 };
63 static TAILQ_HEAD(, vbdev_delay) g_delay_nodes = TAILQ_HEAD_INITIALIZER(g_delay_nodes);
64 
65 struct delay_bdev_io {
66 	int status;
67 
68 	uint64_t completion_tick;
69 
70 	enum delay_io_type type;
71 
72 	struct spdk_io_channel *ch;
73 
74 	struct spdk_bdev_io_wait_entry bdev_io_wait;
75 
76 	struct spdk_bdev_io *zcopy_bdev_io;
77 
78 	STAILQ_ENTRY(delay_bdev_io) link;
79 };
80 
81 struct delay_io_channel {
82 	struct spdk_io_channel	*base_ch; /* IO channel of base device */
83 	STAILQ_HEAD(, delay_bdev_io) avg_read_io;
84 	STAILQ_HEAD(, delay_bdev_io) p99_read_io;
85 	STAILQ_HEAD(, delay_bdev_io) avg_write_io;
86 	STAILQ_HEAD(, delay_bdev_io) p99_write_io;
87 	struct spdk_poller *io_poller;
88 	unsigned int rand_seed;
89 };
90 
91 static void vbdev_delay_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io);
92 
93 
94 /* Callback for unregistering the IO device. */
95 static void
96 _device_unregister_cb(void *io_device)
97 {
98 	struct vbdev_delay *delay_node  = io_device;
99 
100 	/* Done with this delay_node. */
101 	free(delay_node->delay_bdev.name);
102 	free(delay_node);
103 }
104 
105 static void
106 _vbdev_delay_destruct(void *ctx)
107 {
108 	struct spdk_bdev_desc *desc = ctx;
109 
110 	spdk_bdev_close(desc);
111 }
112 
113 static int
114 vbdev_delay_destruct(void *ctx)
115 {
116 	struct vbdev_delay *delay_node = (struct vbdev_delay *)ctx;
117 
118 	/* It is important to follow this exact sequence of steps for destroying
119 	 * a vbdev...
120 	 */
121 
122 	TAILQ_REMOVE(&g_delay_nodes, delay_node, link);
123 
124 	/* Unclaim the underlying bdev. */
125 	spdk_bdev_module_release_bdev(delay_node->base_bdev);
126 
127 	/* Close the underlying bdev on its same opened thread. */
128 	if (delay_node->thread && delay_node->thread != spdk_get_thread()) {
129 		spdk_thread_send_msg(delay_node->thread, _vbdev_delay_destruct, delay_node->base_desc);
130 	} else {
131 		spdk_bdev_close(delay_node->base_desc);
132 	}
133 
134 	/* Unregister the io_device. */
135 	spdk_io_device_unregister(delay_node, _device_unregister_cb);
136 
137 	return 0;
138 }
139 
140 static int
141 _process_io_stailq(void *arg, uint64_t ticks)
142 {
143 	STAILQ_HEAD(, delay_bdev_io) *head = arg;
144 	struct delay_bdev_io *io_ctx, *tmp;
145 	int completions = 0;
146 
147 	STAILQ_FOREACH_SAFE(io_ctx, head, link, tmp) {
148 		if (io_ctx->completion_tick <= ticks) {
149 			STAILQ_REMOVE(head, io_ctx, delay_bdev_io, link);
150 			spdk_bdev_io_complete(spdk_bdev_io_from_ctx(io_ctx), io_ctx->status);
151 			completions++;
152 		} else {
153 			/* In the general case, I/O will become ready in an fifo order. When timeouts are dynamically
154 			 * changed, this is not necessarily the case. However, the normal behavior will be restored
155 			 * after the outstanding I/O at the time of the change have been completed.
156 			 * This essentially means that moving from a high to low latency creates a dam for the new I/O
157 			 * submitted after the latency change. This is considered desirable behavior for the use case where
158 			 * we are trying to trigger a pre-defined timeout on an initiator.
159 			 */
160 			break;
161 		}
162 	}
163 
164 	return completions;
165 }
166 
167 static int
168 _delay_finish_io(void *arg)
169 {
170 	struct delay_io_channel *delay_ch = arg;
171 	uint64_t ticks = spdk_get_ticks();
172 	int completions = 0;
173 
174 	completions += _process_io_stailq(&delay_ch->avg_read_io, ticks);
175 	completions += _process_io_stailq(&delay_ch->avg_write_io, ticks);
176 	completions += _process_io_stailq(&delay_ch->p99_read_io, ticks);
177 	completions += _process_io_stailq(&delay_ch->p99_write_io, ticks);
178 
179 	return completions == 0 ? SPDK_POLLER_IDLE : SPDK_POLLER_BUSY;
180 }
181 
182 /* Completion callback for IO that were issued from this bdev. The original bdev_io
183  * is passed in as an arg so we'll complete that one with the appropriate status
184  * and then free the one that this module issued.
185  */
186 static void
187 _delay_complete_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
188 {
189 	struct spdk_bdev_io *orig_io = cb_arg;
190 	struct vbdev_delay *delay_node = SPDK_CONTAINEROF(orig_io->bdev, struct vbdev_delay, delay_bdev);
191 	struct delay_bdev_io *io_ctx = (struct delay_bdev_io *)orig_io->driver_ctx;
192 	struct delay_io_channel *delay_ch = spdk_io_channel_get_ctx(io_ctx->ch);
193 
194 	io_ctx->status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
195 
196 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY && bdev_io->u.bdev.zcopy.start && success) {
197 		io_ctx->zcopy_bdev_io = bdev_io;
198 	} else {
199 		assert(io_ctx->zcopy_bdev_io == NULL || io_ctx->zcopy_bdev_io == bdev_io);
200 		io_ctx->zcopy_bdev_io = NULL;
201 		spdk_bdev_free_io(bdev_io);
202 	}
203 
204 	/* Put the I/O into the proper list for processing by the channel poller. */
205 	switch (io_ctx->type) {
206 	case DELAY_AVG_READ:
207 		io_ctx->completion_tick = spdk_get_ticks() + delay_node->average_read_latency_ticks;
208 		STAILQ_INSERT_TAIL(&delay_ch->avg_read_io, io_ctx, link);
209 		break;
210 	case DELAY_AVG_WRITE:
211 		io_ctx->completion_tick = spdk_get_ticks() + delay_node->average_write_latency_ticks;
212 		STAILQ_INSERT_TAIL(&delay_ch->avg_write_io, io_ctx, link);
213 		break;
214 	case DELAY_P99_READ:
215 		io_ctx->completion_tick = spdk_get_ticks() + delay_node->p99_read_latency_ticks;
216 		STAILQ_INSERT_TAIL(&delay_ch->p99_read_io, io_ctx, link);
217 		break;
218 	case DELAY_P99_WRITE:
219 		io_ctx->completion_tick = spdk_get_ticks() + delay_node->p99_write_latency_ticks;
220 		STAILQ_INSERT_TAIL(&delay_ch->p99_write_io, io_ctx, link);
221 		break;
222 	case DELAY_NONE:
223 	default:
224 		spdk_bdev_io_complete(orig_io, io_ctx->status);
225 		break;
226 	}
227 }
228 
229 static void
230 vbdev_delay_resubmit_io(void *arg)
231 {
232 	struct spdk_bdev_io *bdev_io = (struct spdk_bdev_io *)arg;
233 	struct delay_bdev_io *io_ctx = (struct delay_bdev_io *)bdev_io->driver_ctx;
234 
235 	vbdev_delay_submit_request(io_ctx->ch, bdev_io);
236 }
237 
238 static void
239 vbdev_delay_queue_io(struct spdk_bdev_io *bdev_io)
240 {
241 	struct delay_bdev_io *io_ctx = (struct delay_bdev_io *)bdev_io->driver_ctx;
242 	struct delay_io_channel *delay_ch = spdk_io_channel_get_ctx(io_ctx->ch);
243 	int rc;
244 
245 	io_ctx->bdev_io_wait.bdev = bdev_io->bdev;
246 	io_ctx->bdev_io_wait.cb_fn = vbdev_delay_resubmit_io;
247 	io_ctx->bdev_io_wait.cb_arg = bdev_io;
248 
249 	rc = spdk_bdev_queue_io_wait(bdev_io->bdev, delay_ch->base_ch, &io_ctx->bdev_io_wait);
250 	if (rc != 0) {
251 		SPDK_ERRLOG("Queue io failed in vbdev_delay_queue_io, rc=%d.\n", rc);
252 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
253 	}
254 }
255 
256 static void
257 delay_read_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success)
258 {
259 	struct vbdev_delay *delay_node = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_delay,
260 					 delay_bdev);
261 	struct delay_io_channel *delay_ch = spdk_io_channel_get_ctx(ch);
262 	int rc;
263 
264 	if (!success) {
265 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
266 		return;
267 	}
268 
269 	rc = spdk_bdev_readv_blocks(delay_node->base_desc, delay_ch->base_ch, bdev_io->u.bdev.iovs,
270 				    bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks,
271 				    bdev_io->u.bdev.num_blocks, _delay_complete_io,
272 				    bdev_io);
273 
274 	if (rc == -ENOMEM) {
275 		SPDK_ERRLOG("No memory, start to queue io for delay.\n");
276 		vbdev_delay_queue_io(bdev_io);
277 	} else if (rc != 0) {
278 		SPDK_ERRLOG("ERROR on bdev_io submission!\n");
279 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
280 	}
281 }
282 
283 static void
284 vbdev_delay_reset_dev(struct spdk_io_channel_iter *i, int status)
285 {
286 	struct spdk_bdev_io *bdev_io = spdk_io_channel_iter_get_ctx(i);
287 	struct delay_bdev_io *io_ctx = (struct delay_bdev_io *)bdev_io->driver_ctx;
288 	struct delay_io_channel *delay_ch = spdk_io_channel_get_ctx(io_ctx->ch);
289 	struct vbdev_delay *delay_node = spdk_io_channel_iter_get_io_device(i);
290 	int rc;
291 
292 	rc = spdk_bdev_reset(delay_node->base_desc, delay_ch->base_ch,
293 			     _delay_complete_io, bdev_io);
294 
295 	if (rc == -ENOMEM) {
296 		SPDK_ERRLOG("No memory, start to queue io for delay.\n");
297 		vbdev_delay_queue_io(bdev_io);
298 	} else if (rc != 0) {
299 		SPDK_ERRLOG("ERROR on bdev_io submission!\n");
300 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
301 	}
302 }
303 
304 static void
305 abort_zcopy_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
306 {
307 	spdk_bdev_free_io(bdev_io);
308 }
309 
310 static void
311 _abort_all_delayed_io(void *arg)
312 {
313 	STAILQ_HEAD(, delay_bdev_io) *head = arg;
314 	struct delay_bdev_io *io_ctx, *tmp;
315 
316 	STAILQ_FOREACH_SAFE(io_ctx, head, link, tmp) {
317 		STAILQ_REMOVE(head, io_ctx, delay_bdev_io, link);
318 		if (io_ctx->zcopy_bdev_io != NULL) {
319 			spdk_bdev_zcopy_end(io_ctx->zcopy_bdev_io, false, abort_zcopy_io, NULL);
320 		}
321 		spdk_bdev_io_complete(spdk_bdev_io_from_ctx(io_ctx), SPDK_BDEV_IO_STATUS_ABORTED);
322 	}
323 }
324 
325 static void
326 vbdev_delay_reset_channel(struct spdk_io_channel_iter *i)
327 {
328 	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
329 	struct delay_io_channel *delay_ch = spdk_io_channel_get_ctx(ch);
330 
331 	_abort_all_delayed_io(&delay_ch->avg_read_io);
332 	_abort_all_delayed_io(&delay_ch->avg_write_io);
333 	_abort_all_delayed_io(&delay_ch->p99_read_io);
334 	_abort_all_delayed_io(&delay_ch->p99_write_io);
335 
336 	spdk_for_each_channel_continue(i, 0);
337 }
338 
339 static bool
340 abort_delayed_io(void *_head, struct spdk_bdev_io *bio_to_abort)
341 {
342 	STAILQ_HEAD(, delay_bdev_io) *head = _head;
343 	struct delay_bdev_io *io_ctx_to_abort = (struct delay_bdev_io *)bio_to_abort->driver_ctx;
344 	struct delay_bdev_io *io_ctx;
345 
346 	STAILQ_FOREACH(io_ctx, head, link) {
347 		if (io_ctx == io_ctx_to_abort) {
348 			STAILQ_REMOVE(head, io_ctx_to_abort, delay_bdev_io, link);
349 			if (io_ctx->zcopy_bdev_io != NULL) {
350 				spdk_bdev_zcopy_end(io_ctx->zcopy_bdev_io, false, abort_zcopy_io, NULL);
351 			}
352 			spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_ABORTED);
353 			return true;
354 		}
355 	}
356 
357 	return false;
358 }
359 
360 static int
361 vbdev_delay_abort(struct vbdev_delay *delay_node, struct delay_io_channel *delay_ch,
362 		  struct spdk_bdev_io *bdev_io)
363 {
364 	struct spdk_bdev_io *bio_to_abort = bdev_io->u.abort.bio_to_abort;
365 
366 	if (abort_delayed_io(&delay_ch->avg_read_io, bio_to_abort) ||
367 	    abort_delayed_io(&delay_ch->avg_write_io, bio_to_abort) ||
368 	    abort_delayed_io(&delay_ch->p99_read_io, bio_to_abort) ||
369 	    abort_delayed_io(&delay_ch->p99_write_io, bio_to_abort)) {
370 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
371 		return 0;
372 	}
373 
374 	return spdk_bdev_abort(delay_node->base_desc, delay_ch->base_ch, bio_to_abort,
375 			       _delay_complete_io, bdev_io);
376 }
377 
378 static void
379 vbdev_delay_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
380 {
381 	struct vbdev_delay *delay_node = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_delay, delay_bdev);
382 	struct delay_io_channel *delay_ch = spdk_io_channel_get_ctx(ch);
383 	struct delay_bdev_io *io_ctx = (struct delay_bdev_io *)bdev_io->driver_ctx;
384 	int rc = 0;
385 	bool is_p99;
386 
387 	is_p99 = rand_r(&delay_ch->rand_seed) % 100 == 0 ? true : false;
388 
389 	io_ctx->ch = ch;
390 	io_ctx->type = DELAY_NONE;
391 	if (bdev_io->type != SPDK_BDEV_IO_TYPE_ZCOPY || bdev_io->u.bdev.zcopy.start) {
392 		io_ctx->zcopy_bdev_io = NULL;
393 	}
394 
395 	switch (bdev_io->type) {
396 	case SPDK_BDEV_IO_TYPE_READ:
397 		io_ctx->type = is_p99 ? DELAY_P99_READ : DELAY_AVG_READ;
398 		spdk_bdev_io_get_buf(bdev_io, delay_read_get_buf_cb,
399 				     bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
400 		break;
401 	case SPDK_BDEV_IO_TYPE_WRITE:
402 		io_ctx->type = is_p99 ? DELAY_P99_WRITE : DELAY_AVG_WRITE;
403 		rc = spdk_bdev_writev_blocks(delay_node->base_desc, delay_ch->base_ch, bdev_io->u.bdev.iovs,
404 					     bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks,
405 					     bdev_io->u.bdev.num_blocks, _delay_complete_io,
406 					     bdev_io);
407 		break;
408 	case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
409 		rc = spdk_bdev_write_zeroes_blocks(delay_node->base_desc, delay_ch->base_ch,
410 						   bdev_io->u.bdev.offset_blocks,
411 						   bdev_io->u.bdev.num_blocks,
412 						   _delay_complete_io, bdev_io);
413 		break;
414 	case SPDK_BDEV_IO_TYPE_UNMAP:
415 		rc = spdk_bdev_unmap_blocks(delay_node->base_desc, delay_ch->base_ch,
416 					    bdev_io->u.bdev.offset_blocks,
417 					    bdev_io->u.bdev.num_blocks,
418 					    _delay_complete_io, bdev_io);
419 		break;
420 	case SPDK_BDEV_IO_TYPE_FLUSH:
421 		rc = spdk_bdev_flush_blocks(delay_node->base_desc, delay_ch->base_ch,
422 					    bdev_io->u.bdev.offset_blocks,
423 					    bdev_io->u.bdev.num_blocks,
424 					    _delay_complete_io, bdev_io);
425 		break;
426 	case SPDK_BDEV_IO_TYPE_RESET:
427 		/* During reset, the generic bdev layer aborts all new I/Os and queues all new resets.
428 		 * Hence we can simply abort all I/Os delayed to complete.
429 		 */
430 		spdk_for_each_channel(delay_node, vbdev_delay_reset_channel, bdev_io,
431 				      vbdev_delay_reset_dev);
432 		break;
433 	case SPDK_BDEV_IO_TYPE_ABORT:
434 		rc = vbdev_delay_abort(delay_node, delay_ch, bdev_io);
435 		break;
436 	case SPDK_BDEV_IO_TYPE_ZCOPY:
437 		if (bdev_io->u.bdev.zcopy.commit) {
438 			io_ctx->type = is_p99 ? DELAY_P99_WRITE : DELAY_AVG_WRITE;
439 		} else if (bdev_io->u.bdev.zcopy.populate) {
440 			io_ctx->type = is_p99 ? DELAY_P99_READ : DELAY_AVG_READ;
441 		}
442 		if (bdev_io->u.bdev.zcopy.start) {
443 			rc = spdk_bdev_zcopy_start(delay_node->base_desc, delay_ch->base_ch,
444 						   bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
445 						   bdev_io->u.bdev.offset_blocks,
446 						   bdev_io->u.bdev.num_blocks,
447 						   bdev_io->u.bdev.zcopy.populate,
448 						   _delay_complete_io, bdev_io);
449 		} else {
450 			rc = spdk_bdev_zcopy_end(io_ctx->zcopy_bdev_io, bdev_io->u.bdev.zcopy.commit,
451 						 _delay_complete_io, bdev_io);
452 		}
453 		break;
454 	default:
455 		SPDK_ERRLOG("delay: unknown I/O type %d\n", bdev_io->type);
456 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
457 		return;
458 	}
459 
460 	if (rc == -ENOMEM) {
461 		SPDK_ERRLOG("No memory, start to queue io for delay.\n");
462 		vbdev_delay_queue_io(bdev_io);
463 	} else if (rc != 0) {
464 		SPDK_ERRLOG("ERROR on bdev_io submission!\n");
465 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
466 	}
467 }
468 
469 static bool
470 vbdev_delay_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type)
471 {
472 	struct vbdev_delay *delay_node = (struct vbdev_delay *)ctx;
473 
474 	return spdk_bdev_io_type_supported(delay_node->base_bdev, io_type);
475 }
476 
477 static struct spdk_io_channel *
478 vbdev_delay_get_io_channel(void *ctx)
479 {
480 	struct vbdev_delay *delay_node = (struct vbdev_delay *)ctx;
481 	struct spdk_io_channel *delay_ch = NULL;
482 
483 	delay_ch = spdk_get_io_channel(delay_node);
484 
485 	return delay_ch;
486 }
487 
488 static void
489 _delay_write_conf_values(struct vbdev_delay *delay_node, struct spdk_json_write_ctx *w)
490 {
491 	spdk_json_write_named_string(w, "name", spdk_bdev_get_name(&delay_node->delay_bdev));
492 	spdk_json_write_named_string(w, "base_bdev_name", spdk_bdev_get_name(delay_node->base_bdev));
493 	spdk_json_write_named_int64(w, "avg_read_latency",
494 				    delay_node->average_read_latency_ticks * SPDK_SEC_TO_USEC / spdk_get_ticks_hz());
495 	spdk_json_write_named_int64(w, "p99_read_latency",
496 				    delay_node->p99_read_latency_ticks * SPDK_SEC_TO_USEC / spdk_get_ticks_hz());
497 	spdk_json_write_named_int64(w, "avg_write_latency",
498 				    delay_node->average_write_latency_ticks * SPDK_SEC_TO_USEC / spdk_get_ticks_hz());
499 	spdk_json_write_named_int64(w, "p99_write_latency",
500 				    delay_node->p99_write_latency_ticks * SPDK_SEC_TO_USEC / spdk_get_ticks_hz());
501 }
502 
503 static int
504 vbdev_delay_dump_info_json(void *ctx, struct spdk_json_write_ctx *w)
505 {
506 	struct vbdev_delay *delay_node = (struct vbdev_delay *)ctx;
507 
508 	spdk_json_write_name(w, "delay");
509 	spdk_json_write_object_begin(w);
510 	_delay_write_conf_values(delay_node, w);
511 	spdk_json_write_object_end(w);
512 
513 	return 0;
514 }
515 
516 /* This is used to generate JSON that can configure this module to its current state. */
517 static int
518 vbdev_delay_config_json(struct spdk_json_write_ctx *w)
519 {
520 	struct vbdev_delay *delay_node;
521 
522 	TAILQ_FOREACH(delay_node, &g_delay_nodes, link) {
523 		spdk_json_write_object_begin(w);
524 		spdk_json_write_named_string(w, "method", "bdev_delay_create");
525 		spdk_json_write_named_object_begin(w, "params");
526 		_delay_write_conf_values(delay_node, w);
527 		spdk_json_write_object_end(w);
528 		spdk_json_write_object_end(w);
529 	}
530 	return 0;
531 }
532 
533 /* We provide this callback for the SPDK channel code to create a channel using
534  * the channel struct we provided in our module get_io_channel() entry point. Here
535  * we get and save off an underlying base channel of the device below us so that
536  * we can communicate with the base bdev on a per channel basis.  If we needed
537  * our own poller for this vbdev, we'd register it here.
538  */
539 static int
540 delay_bdev_ch_create_cb(void *io_device, void *ctx_buf)
541 {
542 	struct delay_io_channel *delay_ch = ctx_buf;
543 	struct vbdev_delay *delay_node = io_device;
544 
545 	STAILQ_INIT(&delay_ch->avg_read_io);
546 	STAILQ_INIT(&delay_ch->p99_read_io);
547 	STAILQ_INIT(&delay_ch->avg_write_io);
548 	STAILQ_INIT(&delay_ch->p99_write_io);
549 
550 	delay_ch->io_poller = SPDK_POLLER_REGISTER(_delay_finish_io, delay_ch, 0);
551 	delay_ch->base_ch = spdk_bdev_get_io_channel(delay_node->base_desc);
552 	delay_ch->rand_seed = time(NULL);
553 
554 	return 0;
555 }
556 
557 /* We provide this callback for the SPDK channel code to destroy a channel
558  * created with our create callback. We just need to undo anything we did
559  * when we created. If this bdev used its own poller, we'd unregister it here.
560  */
561 static void
562 delay_bdev_ch_destroy_cb(void *io_device, void *ctx_buf)
563 {
564 	struct delay_io_channel *delay_ch = ctx_buf;
565 
566 	spdk_poller_unregister(&delay_ch->io_poller);
567 	spdk_put_io_channel(delay_ch->base_ch);
568 }
569 
570 /* Create the delay association from the bdev and vbdev name and insert
571  * on the global list. */
572 static int
573 vbdev_delay_insert_association(const char *bdev_name, const char *vbdev_name,
574 			       uint64_t avg_read_latency, uint64_t p99_read_latency,
575 			       uint64_t avg_write_latency, uint64_t p99_write_latency)
576 {
577 	struct bdev_association *assoc;
578 
579 	TAILQ_FOREACH(assoc, &g_bdev_associations, link) {
580 		if (strcmp(vbdev_name, assoc->vbdev_name) == 0) {
581 			SPDK_ERRLOG("delay bdev %s already exists\n", vbdev_name);
582 			return -EEXIST;
583 		}
584 	}
585 
586 	assoc = calloc(1, sizeof(struct bdev_association));
587 	if (!assoc) {
588 		SPDK_ERRLOG("could not allocate bdev_association\n");
589 		return -ENOMEM;
590 	}
591 
592 	assoc->bdev_name = strdup(bdev_name);
593 	if (!assoc->bdev_name) {
594 		SPDK_ERRLOG("could not allocate assoc->bdev_name\n");
595 		free(assoc);
596 		return -ENOMEM;
597 	}
598 
599 	assoc->vbdev_name = strdup(vbdev_name);
600 	if (!assoc->vbdev_name) {
601 		SPDK_ERRLOG("could not allocate assoc->vbdev_name\n");
602 		free(assoc->bdev_name);
603 		free(assoc);
604 		return -ENOMEM;
605 	}
606 
607 	assoc->avg_read_latency = avg_read_latency;
608 	assoc->p99_read_latency = p99_read_latency;
609 	assoc->avg_write_latency = avg_write_latency;
610 	assoc->p99_write_latency = p99_write_latency;
611 
612 	TAILQ_INSERT_TAIL(&g_bdev_associations, assoc, link);
613 
614 	return 0;
615 }
616 
617 int
618 vbdev_delay_update_latency_value(char *delay_name, uint64_t latency_us, enum delay_io_type type)
619 {
620 	struct vbdev_delay *delay_node;
621 	uint64_t ticks_mhz = spdk_get_ticks_hz() / SPDK_SEC_TO_USEC;
622 
623 	TAILQ_FOREACH(delay_node, &g_delay_nodes, link) {
624 		if (strcmp(delay_node->delay_bdev.name, delay_name) == 0) {
625 			break;
626 		}
627 	}
628 
629 	if (delay_node == NULL) {
630 		return -ENODEV;
631 	}
632 
633 	switch (type) {
634 	case DELAY_AVG_READ:
635 		delay_node->average_read_latency_ticks = ticks_mhz * latency_us;
636 		break;
637 	case DELAY_AVG_WRITE:
638 		delay_node->average_write_latency_ticks = ticks_mhz * latency_us;
639 		break;
640 	case DELAY_P99_READ:
641 		delay_node->p99_read_latency_ticks = ticks_mhz * latency_us;
642 		break;
643 	case DELAY_P99_WRITE:
644 		delay_node->p99_write_latency_ticks = ticks_mhz * latency_us;
645 		break;
646 	default:
647 		return -EINVAL;
648 	}
649 
650 	return 0;
651 }
652 
653 static int
654 vbdev_delay_init(void)
655 {
656 	/* Not allowing for .ini style configuration. */
657 	return 0;
658 }
659 
660 static void
661 vbdev_delay_finish(void)
662 {
663 	struct bdev_association *assoc;
664 
665 	while ((assoc = TAILQ_FIRST(&g_bdev_associations))) {
666 		TAILQ_REMOVE(&g_bdev_associations, assoc, link);
667 		free(assoc->bdev_name);
668 		free(assoc->vbdev_name);
669 		free(assoc);
670 	}
671 }
672 
673 static int
674 vbdev_delay_get_ctx_size(void)
675 {
676 	return sizeof(struct delay_bdev_io);
677 }
678 
679 static void
680 vbdev_delay_write_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w)
681 {
682 	/* No config per bdev needed */
683 }
684 
685 static int
686 vbdev_delay_get_memory_domains(void *ctx, struct spdk_memory_domain **domains, int array_size)
687 {
688 	struct vbdev_delay *delay_node = (struct vbdev_delay *)ctx;
689 
690 	/* Delay bdev doesn't work with data buffers, so it supports any memory domain used by base_bdev */
691 	return spdk_bdev_get_memory_domains(delay_node->base_bdev, domains, array_size);
692 }
693 
694 /* When we register our bdev this is how we specify our entry points. */
695 static const struct spdk_bdev_fn_table vbdev_delay_fn_table = {
696 	.destruct		= vbdev_delay_destruct,
697 	.submit_request		= vbdev_delay_submit_request,
698 	.io_type_supported	= vbdev_delay_io_type_supported,
699 	.get_io_channel		= vbdev_delay_get_io_channel,
700 	.dump_info_json		= vbdev_delay_dump_info_json,
701 	.write_config_json	= vbdev_delay_write_config_json,
702 	.get_memory_domains	= vbdev_delay_get_memory_domains,
703 };
704 
705 static void
706 vbdev_delay_base_bdev_hotremove_cb(struct spdk_bdev *bdev_find)
707 {
708 	struct vbdev_delay *delay_node, *tmp;
709 
710 	TAILQ_FOREACH_SAFE(delay_node, &g_delay_nodes, link, tmp) {
711 		if (bdev_find == delay_node->base_bdev) {
712 			spdk_bdev_unregister(&delay_node->delay_bdev, NULL, NULL);
713 		}
714 	}
715 }
716 
717 /* Called when the underlying base bdev triggers asynchronous event such as bdev removal. */
718 static void
719 vbdev_delay_base_bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev,
720 			       void *event_ctx)
721 {
722 	switch (type) {
723 	case SPDK_BDEV_EVENT_REMOVE:
724 		vbdev_delay_base_bdev_hotremove_cb(bdev);
725 		break;
726 	default:
727 		SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type);
728 		break;
729 	}
730 }
731 
732 /* Create and register the delay vbdev if we find it in our list of bdev names.
733  * This can be called either by the examine path or RPC method.
734  */
735 static int
736 vbdev_delay_register(const char *bdev_name)
737 {
738 	struct bdev_association *assoc;
739 	struct vbdev_delay *delay_node;
740 	struct spdk_bdev *bdev;
741 	uint64_t ticks_mhz = spdk_get_ticks_hz() / SPDK_SEC_TO_USEC;
742 	int rc = 0;
743 
744 	/* Check our list of names from config versus this bdev and if
745 	 * there's a match, create the delay_node & bdev accordingly.
746 	 */
747 	TAILQ_FOREACH(assoc, &g_bdev_associations, link) {
748 		if (strcmp(assoc->bdev_name, bdev_name) != 0) {
749 			continue;
750 		}
751 
752 		delay_node = calloc(1, sizeof(struct vbdev_delay));
753 		if (!delay_node) {
754 			rc = -ENOMEM;
755 			SPDK_ERRLOG("could not allocate delay_node\n");
756 			break;
757 		}
758 		delay_node->delay_bdev.name = strdup(assoc->vbdev_name);
759 		if (!delay_node->delay_bdev.name) {
760 			rc = -ENOMEM;
761 			SPDK_ERRLOG("could not allocate delay_bdev name\n");
762 			free(delay_node);
763 			break;
764 		}
765 		delay_node->delay_bdev.product_name = "delay";
766 
767 		/* The base bdev that we're attaching to. */
768 		rc = spdk_bdev_open_ext(bdev_name, true, vbdev_delay_base_bdev_event_cb,
769 					NULL, &delay_node->base_desc);
770 		if (rc) {
771 			if (rc != -ENODEV) {
772 				SPDK_ERRLOG("could not open bdev %s\n", bdev_name);
773 			}
774 			free(delay_node->delay_bdev.name);
775 			free(delay_node);
776 			break;
777 		}
778 
779 		bdev = spdk_bdev_desc_get_bdev(delay_node->base_desc);
780 		delay_node->base_bdev = bdev;
781 
782 		delay_node->delay_bdev.write_cache = bdev->write_cache;
783 		delay_node->delay_bdev.required_alignment = bdev->required_alignment;
784 		delay_node->delay_bdev.optimal_io_boundary = bdev->optimal_io_boundary;
785 		delay_node->delay_bdev.blocklen = bdev->blocklen;
786 		delay_node->delay_bdev.blockcnt = bdev->blockcnt;
787 
788 		delay_node->delay_bdev.ctxt = delay_node;
789 		delay_node->delay_bdev.fn_table = &vbdev_delay_fn_table;
790 		delay_node->delay_bdev.module = &delay_if;
791 
792 		/* Store the number of ticks you need to add to get the I/O expiration time. */
793 		delay_node->average_read_latency_ticks = ticks_mhz * assoc->avg_read_latency;
794 		delay_node->p99_read_latency_ticks = ticks_mhz * assoc->p99_read_latency;
795 		delay_node->average_write_latency_ticks = ticks_mhz * assoc->avg_write_latency;
796 		delay_node->p99_write_latency_ticks = ticks_mhz * assoc->p99_write_latency;
797 
798 		spdk_io_device_register(delay_node, delay_bdev_ch_create_cb, delay_bdev_ch_destroy_cb,
799 					sizeof(struct delay_io_channel),
800 					assoc->vbdev_name);
801 
802 		/* Save the thread where the base device is opened */
803 		delay_node->thread = spdk_get_thread();
804 
805 		rc = spdk_bdev_module_claim_bdev(bdev, delay_node->base_desc, delay_node->delay_bdev.module);
806 		if (rc) {
807 			SPDK_ERRLOG("could not claim bdev %s\n", bdev_name);
808 			goto error_close;
809 		}
810 
811 		rc = spdk_bdev_register(&delay_node->delay_bdev);
812 		if (rc) {
813 			SPDK_ERRLOG("could not register delay_bdev\n");
814 			spdk_bdev_module_release_bdev(delay_node->base_bdev);
815 			goto error_close;
816 		}
817 
818 		TAILQ_INSERT_TAIL(&g_delay_nodes, delay_node, link);
819 	}
820 
821 	return rc;
822 
823 error_close:
824 	spdk_bdev_close(delay_node->base_desc);
825 	spdk_io_device_unregister(delay_node, NULL);
826 	free(delay_node->delay_bdev.name);
827 	free(delay_node);
828 	return rc;
829 }
830 
831 int
832 create_delay_disk(const char *bdev_name, const char *vbdev_name, uint64_t avg_read_latency,
833 		  uint64_t p99_read_latency, uint64_t avg_write_latency, uint64_t p99_write_latency)
834 {
835 	int rc = 0;
836 
837 	if (p99_read_latency < avg_read_latency || p99_write_latency < avg_write_latency) {
838 		SPDK_ERRLOG("Unable to create a delay bdev where p99 latency is less than average latency.\n");
839 		return -EINVAL;
840 	}
841 
842 	rc = vbdev_delay_insert_association(bdev_name, vbdev_name, avg_read_latency, p99_read_latency,
843 					    avg_write_latency, p99_write_latency);
844 	if (rc) {
845 		return rc;
846 	}
847 
848 	rc = vbdev_delay_register(bdev_name);
849 	if (rc == -ENODEV) {
850 		/* This is not an error, we tracked the name above and it still
851 		 * may show up later.
852 		 */
853 		SPDK_NOTICELOG("vbdev creation deferred pending base bdev arrival\n");
854 		rc = 0;
855 	}
856 
857 	return rc;
858 }
859 
860 void
861 delete_delay_disk(const char *vbdev_name, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
862 {
863 	struct bdev_association *assoc;
864 	int rc;
865 
866 	rc = spdk_bdev_unregister_by_name(vbdev_name, &delay_if, cb_fn, cb_arg);
867 	if (rc == 0) {
868 		TAILQ_FOREACH(assoc, &g_bdev_associations, link) {
869 			if (strcmp(assoc->vbdev_name, vbdev_name) == 0) {
870 				TAILQ_REMOVE(&g_bdev_associations, assoc, link);
871 				free(assoc->bdev_name);
872 				free(assoc->vbdev_name);
873 				free(assoc);
874 				break;
875 			}
876 		}
877 	} else {
878 		cb_fn(cb_arg, rc);
879 	}
880 }
881 
882 static void
883 vbdev_delay_examine(struct spdk_bdev *bdev)
884 {
885 	vbdev_delay_register(bdev->name);
886 
887 	spdk_bdev_module_examine_done(&delay_if);
888 }
889 
890 SPDK_LOG_REGISTER_COMPONENT(vbdev_delay)
891