1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2022 Intel Corporation.
3 * All rights reserved.
4 */
5
6 /*
7 * virtio-blk over vfio-user transport
8 */
9 #include <linux/virtio_blk.h>
10
11 #include "spdk/env.h"
12 #include "spdk/bdev.h"
13 #include "spdk/bdev_module.h"
14 #include "spdk/stdinc.h"
15 #include "spdk/assert.h"
16 #include "spdk/barrier.h"
17 #include "spdk/thread.h"
18 #include "spdk/memory.h"
19 #include "spdk/util.h"
20 #include "spdk/log.h"
21 #include "spdk/string.h"
22 #include "spdk/likely.h"
23 #include "spdk/pci_ids.h"
24
25 #include "vfu_virtio_internal.h"
26
27 #define VIRTIO_BLK_SUPPORTED_FEATURES ((1ULL << VIRTIO_BLK_F_SIZE_MAX) | \
28 (1ULL << VIRTIO_BLK_F_SEG_MAX) | \
29 (1ULL << VIRTIO_BLK_F_TOPOLOGY) | \
30 (1ULL << VIRTIO_BLK_F_BLK_SIZE) | \
31 (1ULL << VIRTIO_BLK_F_MQ))
32
33 struct virtio_blk_endpoint {
34 struct vfu_virtio_endpoint virtio;
35
36 /* virtio_blk specific configurations */
37 struct spdk_thread *init_thread;
38 struct spdk_bdev *bdev;
39 struct spdk_bdev_desc *bdev_desc;
40 struct spdk_io_channel *io_channel;
41 struct virtio_blk_config blk_cfg;
42
43 /* virtio_blk ring process poller */
44 struct spdk_poller *ring_poller;
45 };
46
47 struct virtio_blk_req {
48 volatile uint8_t *status;
49 struct virtio_blk_endpoint *endpoint;
50 /* KEEP req at last */
51 struct vfu_virtio_req req;
52 };
53
54 static inline struct virtio_blk_endpoint *
to_blk_endpoint(struct vfu_virtio_endpoint * virtio_endpoint)55 to_blk_endpoint(struct vfu_virtio_endpoint *virtio_endpoint)
56 {
57 return SPDK_CONTAINEROF(virtio_endpoint, struct virtio_blk_endpoint, virtio);
58 }
59
60 static inline struct virtio_blk_req *
to_blk_request(struct vfu_virtio_req * request)61 to_blk_request(struct vfu_virtio_req *request)
62 {
63 return SPDK_CONTAINEROF(request, struct virtio_blk_req, req);
64 }
65
66 static int
vfu_virtio_blk_vring_poll(void * ctx)67 vfu_virtio_blk_vring_poll(void *ctx)
68 {
69 struct virtio_blk_endpoint *blk_endpoint = ctx;
70 struct vfu_virtio_dev *dev = blk_endpoint->virtio.dev;
71 struct vfu_virtio_vq *vq;
72 uint32_t i, count = 0;
73
74 if (spdk_unlikely(!virtio_dev_is_started(dev))) {
75 return SPDK_POLLER_IDLE;
76 }
77
78 if (spdk_unlikely(blk_endpoint->virtio.quiesce_in_progress)) {
79 return SPDK_POLLER_IDLE;
80 }
81
82 for (i = 0; i < dev->num_queues; i++) {
83 vq = &dev->vqs[i];
84 if (!vq->enabled || vq->q_state != VFU_VQ_ACTIVE) {
85 continue;
86 }
87
88 vfu_virtio_vq_flush_irq(dev, vq);
89
90 if (vq->packed.packed_ring) {
91 /* packed vring */
92 count += vfu_virtio_dev_process_packed_ring(dev, vq);
93 } else {
94 /* split vring */
95 count += vfu_virtio_dev_process_split_ring(dev, vq);
96 }
97 }
98
99 return count ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
100 }
101
102 static int
virtio_blk_start(struct vfu_virtio_endpoint * virtio_endpoint)103 virtio_blk_start(struct vfu_virtio_endpoint *virtio_endpoint)
104 {
105 struct virtio_blk_endpoint *blk_endpoint = to_blk_endpoint(virtio_endpoint);
106
107 if (blk_endpoint->ring_poller) {
108 return 0;
109 }
110
111 SPDK_DEBUGLOG(vfu_virtio_blk, "starting %s\n", virtio_endpoint->dev->name);
112 blk_endpoint->io_channel = spdk_bdev_get_io_channel(blk_endpoint->bdev_desc);
113 blk_endpoint->ring_poller = SPDK_POLLER_REGISTER(vfu_virtio_blk_vring_poll, blk_endpoint, 0);
114
115 return 0;
116 }
117
118 static void
_virtio_blk_stop_msg(void * ctx)119 _virtio_blk_stop_msg(void *ctx)
120 {
121 struct virtio_blk_endpoint *blk_endpoint = ctx;
122
123 spdk_poller_unregister(&blk_endpoint->ring_poller);
124 spdk_put_io_channel(blk_endpoint->io_channel);
125 blk_endpoint->io_channel = NULL;
126
127 SPDK_DEBUGLOG(vfu_virtio_blk, "%s is stopped\n",
128 spdk_vfu_get_endpoint_id(blk_endpoint->virtio.endpoint));
129 }
130
131 static int
virtio_blk_stop(struct vfu_virtio_endpoint * virtio_endpoint)132 virtio_blk_stop(struct vfu_virtio_endpoint *virtio_endpoint)
133 {
134 struct virtio_blk_endpoint *blk_endpoint = to_blk_endpoint(virtio_endpoint);
135
136 if (!blk_endpoint->io_channel) {
137 return 0;
138 }
139
140 SPDK_DEBUGLOG(vfu_virtio_blk, "%s stopping\n", spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint));
141 spdk_thread_send_msg(virtio_endpoint->thread, _virtio_blk_stop_msg, blk_endpoint);
142 return 0;
143 }
144
145 static void
virtio_blk_req_finish(struct virtio_blk_req * blk_req,uint8_t status)146 virtio_blk_req_finish(struct virtio_blk_req *blk_req, uint8_t status)
147 {
148 struct vfu_virtio_req *req = &blk_req->req;
149
150 if (spdk_likely(blk_req->status)) {
151 *blk_req->status = status;
152 blk_req->status = NULL;
153 }
154
155 vfu_virtio_finish_req(req);
156 }
157
158 static void
blk_request_complete_cb(struct spdk_bdev_io * bdev_io,bool success,void * cb_arg)159 blk_request_complete_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
160 {
161 struct virtio_blk_req *blk_req = cb_arg;
162
163 SPDK_DEBUGLOG(vfu_virtio_blk, "IO done status %u\n", success);
164
165 spdk_bdev_free_io(bdev_io);
166 virtio_blk_req_finish(blk_req, success ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR);
167 }
168
169 static int
virtio_blk_process_req(struct vfu_virtio_endpoint * virtio_endpoint,struct vfu_virtio_vq * vq,struct vfu_virtio_req * req)170 virtio_blk_process_req(struct vfu_virtio_endpoint *virtio_endpoint, struct vfu_virtio_vq *vq,
171 struct vfu_virtio_req *req)
172 {
173 struct virtio_blk_endpoint *blk_endpoint = to_blk_endpoint(virtio_endpoint);
174 struct virtio_blk_req *blk_req = to_blk_request(req);
175 const struct virtio_blk_outhdr *hdr;
176 struct virtio_blk_discard_write_zeroes *desc;
177 struct iovec *iov;
178 uint16_t iovcnt;
179 uint64_t flush_bytes;
180 uint32_t type;
181 uint32_t payload_len;
182 int ret;
183
184 blk_req->endpoint = blk_endpoint;
185
186 iov = &req->iovs[0];
187 if (spdk_unlikely(iov->iov_len != sizeof(*hdr))) {
188 SPDK_ERRLOG("Invalid virtio_blk header length %lu\n", iov->iov_len);
189 virtio_blk_req_finish(blk_req, VIRTIO_BLK_S_UNSUPP);
190 return -EINVAL;
191 }
192 hdr = iov->iov_base;
193
194 iov = &req->iovs[req->iovcnt - 1];
195 if (spdk_unlikely(iov->iov_len != 1)) {
196 SPDK_ERRLOG("Invalid virtio_blk response length %lu\n", iov->iov_len);
197 virtio_blk_req_finish(blk_req, VIRTIO_BLK_S_UNSUPP);
198 return -EINVAL;
199 }
200 blk_req->status = iov->iov_base;
201
202 payload_len = req->payload_size;
203 payload_len -= sizeof(*hdr) + 1;
204 iovcnt = req->iovcnt - 2;
205
206 type = hdr->type;
207 /* Legacy type isn't supported */
208 type &= ~VIRTIO_BLK_T_BARRIER;
209
210 SPDK_DEBUGLOG(vfu_virtio_blk, "%s: type %u, iovcnt %u, payload_len %u\n",
211 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
212 type, iovcnt, payload_len);
213
214 if (spdk_unlikely(blk_endpoint->bdev_desc == NULL)) {
215 SPDK_ERRLOG("Bdev has been removed\n");
216 virtio_blk_req_finish(blk_req, VIRTIO_BLK_S_IOERR);
217 return 0;
218 }
219
220 switch (type) {
221 case VIRTIO_BLK_T_IN:
222 case VIRTIO_BLK_T_OUT:
223 if (spdk_unlikely(payload_len == 0 || (payload_len & (512 - 1)) != 0)) {
224 SPDK_ERRLOG("Invalid payload length %u\n", payload_len);
225 virtio_blk_req_finish(blk_req, VIRTIO_BLK_S_UNSUPP);
226 return -EINVAL;
227 }
228 if (type == VIRTIO_BLK_T_IN) {
229 req->used_len = payload_len + 1;
230 ret = spdk_bdev_readv(blk_endpoint->bdev_desc, blk_endpoint->io_channel,
231 &req->iovs[1], iovcnt, hdr->sector * 512,
232 payload_len, blk_request_complete_cb, blk_req);
233 } else {
234 req->used_len = 1;
235 ret = spdk_bdev_writev(blk_endpoint->bdev_desc, blk_endpoint->io_channel,
236 &req->iovs[1], iovcnt, hdr->sector * 512,
237 payload_len, blk_request_complete_cb, blk_req);
238 }
239 if (ret) {
240 SPDK_ERRLOG("R/W error\n");
241 virtio_blk_req_finish(blk_req, VIRTIO_BLK_S_IOERR);
242 return ret;
243 }
244 break;
245 case VIRTIO_BLK_T_DISCARD:
246 desc = req->iovs[1].iov_base;
247 if (payload_len != sizeof(*desc)) {
248 SPDK_NOTICELOG("Invalid discard payload size: %u\n", payload_len);
249 virtio_blk_req_finish(blk_req, VIRTIO_BLK_S_IOERR);
250 return -EINVAL;
251 }
252
253 if (desc->flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP) {
254 SPDK_ERRLOG("UNMAP flag is only used for WRITE ZEROES command\n");
255 virtio_blk_req_finish(blk_req, VIRTIO_BLK_S_UNSUPP);
256 return -EINVAL;
257 }
258
259 ret = spdk_bdev_unmap(blk_endpoint->bdev_desc, blk_endpoint->io_channel,
260 desc->sector * 512, desc->num_sectors * 512,
261 blk_request_complete_cb, blk_req);
262 if (ret) {
263 SPDK_ERRLOG("UNMAP error\n");
264 virtio_blk_req_finish(blk_req, VIRTIO_BLK_S_IOERR);
265 return ret;
266 }
267 break;
268 case VIRTIO_BLK_T_WRITE_ZEROES:
269 desc = req->iovs[1].iov_base;
270 if (payload_len != sizeof(*desc)) {
271 SPDK_NOTICELOG("Invalid write zeroes payload size: %u\n", payload_len);
272 virtio_blk_req_finish(blk_req, VIRTIO_BLK_S_IOERR);
273 return -1;
274 }
275
276 /* Unmap this range, SPDK doesn't support it, kernel will enable this flag by default
277 * without checking unmap feature is negotiated or not, the flag isn't mandatory, so
278 * just print a warning.
279 */
280 if (desc->flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP) {
281 SPDK_WARNLOG("Ignore the unmap flag for WRITE ZEROES from %"PRIx64", len %"PRIx64"\n",
282 (uint64_t)desc->sector * 512, (uint64_t)desc->num_sectors * 512);
283 }
284
285 ret = spdk_bdev_write_zeroes(blk_endpoint->bdev_desc, blk_endpoint->io_channel,
286 desc->sector * 512, desc->num_sectors * 512,
287 blk_request_complete_cb, blk_req);
288 if (ret) {
289 SPDK_ERRLOG("WRITE ZEROES error\n");
290 virtio_blk_req_finish(blk_req, VIRTIO_BLK_S_IOERR);
291 return ret;
292 }
293 break;
294 case VIRTIO_BLK_T_FLUSH:
295 flush_bytes = spdk_bdev_get_num_blocks(blk_endpoint->bdev) * spdk_bdev_get_block_size(
296 blk_endpoint->bdev);
297 if (hdr->sector != 0) {
298 SPDK_NOTICELOG("sector must be zero for flush command\n");
299 virtio_blk_req_finish(blk_req, VIRTIO_BLK_S_IOERR);
300 return -EINVAL;
301 }
302 ret = spdk_bdev_flush(blk_endpoint->bdev_desc, blk_endpoint->io_channel,
303 0, flush_bytes,
304 blk_request_complete_cb, blk_req);
305 if (ret) {
306 SPDK_ERRLOG("FLUSH error\n");
307 virtio_blk_req_finish(blk_req, VIRTIO_BLK_S_IOERR);
308 return ret;
309 }
310 break;
311 case VIRTIO_BLK_T_GET_ID:
312 if (!iovcnt || !payload_len) {
313 virtio_blk_req_finish(blk_req, VIRTIO_BLK_S_UNSUPP);
314 return -EINVAL;
315 }
316 req->used_len = spdk_min((size_t)VIRTIO_BLK_ID_BYTES, req->iovs[1].iov_len);
317 spdk_strcpy_pad(req->iovs[1].iov_base, spdk_bdev_get_name(blk_endpoint->bdev),
318 req->used_len, ' ');
319 virtio_blk_req_finish(blk_req, VIRTIO_BLK_S_OK);
320 break;
321 default:
322 virtio_blk_req_finish(blk_req, VIRTIO_BLK_S_UNSUPP);
323 return -ENOTSUP;
324 }
325
326 return 0;
327 }
328
329 static void
virtio_blk_update_config(struct virtio_blk_config * blk_cfg,struct spdk_bdev * bdev,uint16_t num_queues)330 virtio_blk_update_config(struct virtio_blk_config *blk_cfg, struct spdk_bdev *bdev,
331 uint16_t num_queues)
332 {
333 memset(blk_cfg, 0, sizeof(*blk_cfg));
334
335 if (!bdev) {
336 return;
337 }
338
339 blk_cfg->blk_size = spdk_bdev_get_block_size(bdev);
340 blk_cfg->capacity = (blk_cfg->blk_size * spdk_bdev_get_num_blocks(bdev)) / 512;
341 /* minimum I/O size in blocks */
342 blk_cfg->min_io_size = 1;
343 blk_cfg->num_queues = num_queues;
344
345 if (spdk_bdev_get_buf_align(bdev) > 1) {
346 blk_cfg->size_max = SPDK_BDEV_LARGE_BUF_MAX_SIZE;
347 blk_cfg->seg_max = spdk_min(VIRTIO_DEV_MAX_IOVS - 2 - 1, SPDK_BDEV_IO_NUM_CHILD_IOV - 2 - 1);
348 } else {
349 blk_cfg->size_max = 131072;
350 /* -2 for REQ and RESP and -1 for region boundary splitting */
351 blk_cfg->seg_max = VIRTIO_DEV_MAX_IOVS - 2 - 1;
352 }
353
354 if (spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_UNMAP)) {
355 /* 16MiB, expressed in 512 Bytes */
356 blk_cfg->max_discard_sectors = 32768;
357 blk_cfg->max_discard_seg = 1;
358 blk_cfg->discard_sector_alignment = blk_cfg->blk_size / 512;
359 }
360 if (spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES)) {
361 blk_cfg->max_write_zeroes_sectors = 32768;
362 blk_cfg->max_write_zeroes_seg = 1;
363 }
364 }
365
366 static void
_vfu_virtio_blk_bdev_close(void * arg1)367 _vfu_virtio_blk_bdev_close(void *arg1)
368 {
369 struct spdk_bdev_desc *bdev_desc = arg1;
370
371 spdk_bdev_close(bdev_desc);
372 }
373
374 static void
bdev_event_cb(enum spdk_bdev_event_type type,struct spdk_bdev * bdev,void * event_ctx)375 bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev,
376 void *event_ctx)
377 {
378 struct virtio_blk_endpoint *blk_endpoint = event_ctx;
379
380 SPDK_DEBUGLOG(vfu_virtio_blk, "Bdev event: type %d, name %s\n", type, bdev->name);
381
382 switch (type) {
383 case SPDK_BDEV_EVENT_REMOVE:
384 SPDK_NOTICELOG("bdev name (%s) received event(SPDK_BDEV_EVENT_REMOVE)\n", bdev->name);
385 virtio_blk_update_config(&blk_endpoint->blk_cfg, NULL, 0);
386
387 if (blk_endpoint->io_channel) {
388 spdk_thread_send_msg(blk_endpoint->virtio.thread, _virtio_blk_stop_msg, blk_endpoint);
389 }
390
391 if (blk_endpoint->bdev_desc) {
392 spdk_thread_send_msg(blk_endpoint->init_thread, _vfu_virtio_blk_bdev_close,
393 blk_endpoint->bdev_desc);
394 blk_endpoint->bdev_desc = NULL;
395 }
396 break;
397 case SPDK_BDEV_EVENT_RESIZE:
398 SPDK_NOTICELOG("bdev name (%s) received event(SPDK_BDEV_EVENT_RESIZE)\n", bdev->name);
399 virtio_blk_update_config(&blk_endpoint->blk_cfg, blk_endpoint->bdev,
400 blk_endpoint->virtio.num_queues);
401 vfu_virtio_notify_config(&blk_endpoint->virtio);
402 break;
403 default:
404 SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type);
405 break;
406 }
407 }
408
409 static uint64_t
virtio_blk_get_supported_features(struct vfu_virtio_endpoint * virtio_endpoint)410 virtio_blk_get_supported_features(struct vfu_virtio_endpoint *virtio_endpoint)
411 {
412 struct virtio_blk_endpoint *blk_endpoint = to_blk_endpoint(virtio_endpoint);
413 uint64_t features;
414 struct spdk_bdev *bdev;
415
416 features = VIRTIO_BLK_SUPPORTED_FEATURES | VIRTIO_HOST_SUPPORTED_FEATURES;
417
418 if (!virtio_endpoint->packed_ring) {
419 features &= ~(1ULL << VIRTIO_F_RING_PACKED);
420 }
421 bdev = blk_endpoint->bdev;
422
423 if (spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_UNMAP)) {
424 features |= (1ULL << VIRTIO_BLK_F_DISCARD);
425 }
426
427 if (spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES)) {
428 features |= (1ULL << VIRTIO_BLK_F_WRITE_ZEROES);
429 }
430
431 if (spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_FLUSH)) {
432 features |= (1ULL << VIRTIO_BLK_F_FLUSH);
433 }
434
435 return features;
436 }
437
438 static int
virtio_blk_get_device_specific_config(struct vfu_virtio_endpoint * virtio_endpoint,char * buf,uint64_t offset,uint64_t count)439 virtio_blk_get_device_specific_config(struct vfu_virtio_endpoint *virtio_endpoint, char *buf,
440 uint64_t offset, uint64_t count)
441 {
442 struct virtio_blk_endpoint *blk_endpoint = to_blk_endpoint(virtio_endpoint);
443 uint8_t *blk_cfg;
444 uint64_t len;
445
446 if (offset >= sizeof(struct virtio_blk_config)) {
447 return -EINVAL;
448 }
449 len = spdk_min(sizeof(struct virtio_blk_config) - offset, count);
450
451 blk_cfg = (uint8_t *)&blk_endpoint->blk_cfg;
452 memcpy(buf, blk_cfg + offset, len);
453
454 return 0;
455 }
456
457 static struct vfu_virtio_req *
virtio_blk_alloc_req(struct vfu_virtio_endpoint * virtio_endpoint,struct vfu_virtio_vq * vq)458 virtio_blk_alloc_req(struct vfu_virtio_endpoint *virtio_endpoint, struct vfu_virtio_vq *vq)
459 {
460 struct virtio_blk_req *blk_req;
461
462 blk_req = calloc(1, sizeof(*blk_req) + dma_sg_size() * (VIRTIO_DEV_MAX_IOVS + 1));
463 if (!blk_req) {
464 return NULL;
465 }
466
467 return &blk_req->req;
468 }
469
470 static void
virtio_blk_free_req(struct vfu_virtio_endpoint * virtio_endpoint,struct vfu_virtio_vq * vq,struct vfu_virtio_req * req)471 virtio_blk_free_req(struct vfu_virtio_endpoint *virtio_endpoint, struct vfu_virtio_vq *vq,
472 struct vfu_virtio_req *req)
473 {
474 struct virtio_blk_req *blk_req = to_blk_request(req);
475
476 free(blk_req);
477 }
478
479 struct vfu_virtio_ops virtio_blk_ops = {
480 .get_device_features = virtio_blk_get_supported_features,
481 .alloc_req = virtio_blk_alloc_req,
482 .free_req = virtio_blk_free_req,
483 .exec_request = virtio_blk_process_req,
484 .get_config = virtio_blk_get_device_specific_config,
485 .start_device = virtio_blk_start,
486 .stop_device = virtio_blk_stop,
487 };
488
489 int
vfu_virtio_blk_add_bdev(const char * name,const char * bdev_name,uint16_t num_queues,uint16_t qsize,bool packed_ring)490 vfu_virtio_blk_add_bdev(const char *name, const char *bdev_name,
491 uint16_t num_queues, uint16_t qsize, bool packed_ring)
492 {
493 struct spdk_vfu_endpoint *endpoint;
494 struct vfu_virtio_endpoint *virtio_endpoint;
495 struct virtio_blk_endpoint *blk_endpoint;
496 int ret;
497
498 endpoint = spdk_vfu_get_endpoint_by_name(name);
499 if (!endpoint) {
500 SPDK_ERRLOG("Endpoint %s doesn't exist\n", name);
501 return -ENOENT;
502 }
503
504 virtio_endpoint = spdk_vfu_get_endpoint_private(endpoint);
505 blk_endpoint = to_blk_endpoint(virtio_endpoint);
506
507 if (blk_endpoint->bdev_desc) {
508 SPDK_ERRLOG("%s: block device already exists\n", spdk_vfu_get_endpoint_id(endpoint));
509 return -EEXIST;
510 }
511
512 if (num_queues && (num_queues <= VIRTIO_DEV_MAX_VQS)) {
513 blk_endpoint->virtio.num_queues = num_queues;
514 }
515 if (qsize && (qsize <= VIRTIO_VQ_MAX_SIZE)) {
516 blk_endpoint->virtio.qsize = qsize;
517 }
518 blk_endpoint->virtio.packed_ring = packed_ring;
519
520 SPDK_DEBUGLOG(vfu_virtio_blk, "%s: add block device %s, num_queues %u, qsize %u, packed ring %s\n",
521 spdk_vfu_get_endpoint_id(endpoint),
522 bdev_name, blk_endpoint->virtio.num_queues, blk_endpoint->virtio.qsize,
523 packed_ring ? "enabled" : "disabled");
524
525 ret = spdk_bdev_open_ext(bdev_name, true, bdev_event_cb, blk_endpoint,
526 &blk_endpoint->bdev_desc);
527 if (ret != 0) {
528 SPDK_ERRLOG("%s could not open bdev '%s', error=%d\n",
529 name, bdev_name, ret);
530 return ret;
531 }
532 blk_endpoint->bdev = spdk_bdev_desc_get_bdev(blk_endpoint->bdev_desc);
533 virtio_blk_update_config(&blk_endpoint->blk_cfg, blk_endpoint->bdev,
534 blk_endpoint->virtio.num_queues);
535 blk_endpoint->init_thread = spdk_get_thread();
536
537 return 0;
538 }
539
540 static int
vfu_virtio_blk_endpoint_destruct(struct spdk_vfu_endpoint * endpoint)541 vfu_virtio_blk_endpoint_destruct(struct spdk_vfu_endpoint *endpoint)
542 {
543 struct vfu_virtio_endpoint *virtio_endpoint = spdk_vfu_get_endpoint_private(endpoint);
544 struct virtio_blk_endpoint *blk_endpoint = to_blk_endpoint(virtio_endpoint);
545
546 if (blk_endpoint->bdev_desc) {
547 spdk_thread_send_msg(blk_endpoint->init_thread, _vfu_virtio_blk_bdev_close,
548 blk_endpoint->bdev_desc);
549 blk_endpoint->bdev_desc = NULL;
550 }
551
552 vfu_virtio_endpoint_destruct(&blk_endpoint->virtio);
553 free(blk_endpoint);
554
555 return 0;
556 }
557
558 static void *
vfu_virtio_blk_endpoint_init(struct spdk_vfu_endpoint * endpoint,char * basename,const char * endpoint_name)559 vfu_virtio_blk_endpoint_init(struct spdk_vfu_endpoint *endpoint,
560 char *basename, const char *endpoint_name)
561 {
562 struct virtio_blk_endpoint *blk_endpoint;
563 int ret;
564
565 blk_endpoint = calloc(1, sizeof(*blk_endpoint));
566 if (!blk_endpoint) {
567 return NULL;
568 }
569
570 ret = vfu_virtio_endpoint_setup(&blk_endpoint->virtio, endpoint, basename, endpoint_name,
571 &virtio_blk_ops);
572 if (ret) {
573 SPDK_ERRLOG("Error to setup endpoint %s\n", endpoint_name);
574 free(blk_endpoint);
575 return NULL;
576 }
577
578 return (void *)&blk_endpoint->virtio;
579 }
580
581 static int
vfu_virtio_blk_get_device_info(struct spdk_vfu_endpoint * endpoint,struct spdk_vfu_pci_device * device_info)582 vfu_virtio_blk_get_device_info(struct spdk_vfu_endpoint *endpoint,
583 struct spdk_vfu_pci_device *device_info)
584 {
585 struct vfu_virtio_endpoint *virtio_endpoint = spdk_vfu_get_endpoint_private(endpoint);
586 struct virtio_blk_endpoint *blk_endpoint = to_blk_endpoint(virtio_endpoint);
587
588 vfu_virtio_get_device_info(&blk_endpoint->virtio, device_info);
589 /* Fill Device ID */
590 device_info->id.did = PCI_DEVICE_ID_VIRTIO_BLK_MODERN;
591
592 return 0;
593 }
594
595 struct spdk_vfu_endpoint_ops vfu_virtio_blk_ops = {
596 .name = "virtio_blk",
597 .init = vfu_virtio_blk_endpoint_init,
598 .get_device_info = vfu_virtio_blk_get_device_info,
599 .get_vendor_capability = vfu_virtio_get_vendor_capability,
600 .post_memory_add = vfu_virtio_post_memory_add,
601 .pre_memory_remove = vfu_virtio_pre_memory_remove,
602 .reset_device = vfu_virtio_pci_reset_cb,
603 .quiesce_device = vfu_virtio_quiesce_cb,
604 .destruct = vfu_virtio_blk_endpoint_destruct,
605 .attach_device = vfu_virtio_attach_device,
606 .detach_device = vfu_virtio_detach_device,
607 };
608
609 static void
_vfu_virtio_blk_pci_model_register(void)610 __attribute__((constructor)) _vfu_virtio_blk_pci_model_register(void)
611 {
612 spdk_vfu_register_endpoint_ops(&vfu_virtio_blk_ops);
613 }
614
615 SPDK_LOG_REGISTER_COMPONENT(vfu_virtio_blk)
616