1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2022 Intel Corporation.
3 * All rights reserved.
4 */
5
6 /*
7 * virtio over vfio-user common library
8 */
9 #include "spdk/env.h"
10 #include "spdk/bdev.h"
11 #include "spdk/bdev_module.h"
12 #include "spdk/stdinc.h"
13 #include "spdk/assert.h"
14 #include "spdk/barrier.h"
15 #include "spdk/thread.h"
16 #include "spdk/memory.h"
17 #include "spdk/util.h"
18 #include "spdk/log.h"
19 #include "spdk/string.h"
20 #include "spdk/likely.h"
21
22 #include "vfu_virtio_internal.h"
23
24 static int vfu_virtio_dev_start(struct vfu_virtio_dev *dev);
25 static int vfu_virtio_dev_stop(struct vfu_virtio_dev *dev);
26
27 static inline void
vfu_virtio_unmap_q(struct vfu_virtio_dev * dev,struct q_mapping * mapping)28 vfu_virtio_unmap_q(struct vfu_virtio_dev *dev, struct q_mapping *mapping)
29 {
30 struct vfu_virtio_endpoint *virtio_endpoint = dev->virtio_endpoint;
31
32 if (mapping->addr != NULL) {
33 spdk_vfu_unmap_sg(virtio_endpoint->endpoint, mapping->sg,
34 &mapping->iov, 1);
35 mapping->addr = NULL;
36 mapping->len = 0;
37 }
38 }
39
40 static inline int
vfu_virtio_map_q(struct vfu_virtio_dev * dev,struct q_mapping * mapping,uint64_t phys_addr,uint64_t len)41 vfu_virtio_map_q(struct vfu_virtio_dev *dev, struct q_mapping *mapping, uint64_t phys_addr,
42 uint64_t len)
43 {
44 struct vfu_virtio_endpoint *virtio_endpoint = dev->virtio_endpoint;
45 void *addr;
46
47 if (!mapping->addr && len && phys_addr) {
48 addr = spdk_vfu_map_one(virtio_endpoint->endpoint, phys_addr, len,
49 mapping->sg, &mapping->iov, PROT_READ | PROT_WRITE);
50 if (addr == NULL) {
51 return -EINVAL;
52 }
53 mapping->phys_addr = phys_addr;
54 mapping->len = len;
55 mapping->addr = addr;
56 }
57
58 return 0;
59 }
60
61 static int
virtio_dev_map_vq(struct vfu_virtio_dev * dev,struct vfu_virtio_vq * vq)62 virtio_dev_map_vq(struct vfu_virtio_dev *dev, struct vfu_virtio_vq *vq)
63 {
64 int ret;
65 uint64_t phys_addr, len;
66
67 if (!vq->enabled || (vq->q_state == VFU_VQ_ACTIVE)) {
68 return 0;
69 }
70
71 SPDK_DEBUGLOG(vfu_virtio, "%s: try to map vq %u\n", dev->name, vq->id);
72
73 len = virtio_queue_desc_size(dev, vq);
74 phys_addr = ((((uint64_t)vq->desc_hi) << 32) | vq->desc_lo);
75 ret = vfu_virtio_map_q(dev, &vq->desc, phys_addr, len);
76 if (ret) {
77 SPDK_DEBUGLOG(vfu_virtio, "Error to map descs\n");
78 return ret;
79 }
80
81 len = virtio_queue_avail_size(dev, vq);
82 phys_addr = ((((uint64_t)vq->avail_hi) << 32) | vq->avail_lo);
83 ret = vfu_virtio_map_q(dev, &vq->avail, phys_addr, len);
84 if (ret) {
85 vfu_virtio_unmap_q(dev, &vq->desc);
86 SPDK_DEBUGLOG(vfu_virtio, "Error to map available ring\n");
87 return ret;
88 }
89
90 len = virtio_queue_used_size(dev, vq);
91 phys_addr = ((((uint64_t)vq->used_hi) << 32) | vq->used_lo);
92 ret = vfu_virtio_map_q(dev, &vq->used, phys_addr, len);
93 if (ret) {
94 vfu_virtio_unmap_q(dev, &vq->desc);
95 vfu_virtio_unmap_q(dev, &vq->avail);
96 SPDK_DEBUGLOG(vfu_virtio, "Error to map used ring\n");
97 return ret;
98 }
99
100 /* We're running with polling mode */
101 if (virtio_guest_has_feature(dev, VIRTIO_F_RING_PACKED)) {
102 vq->used.device_event->flags = VRING_PACKED_EVENT_FLAG_DISABLE;
103 } else {
104 vq->used.used->flags = VRING_USED_F_NO_NOTIFY;
105 }
106
107 SPDK_DEBUGLOG(vfu_virtio, "%s: map vq %u successfully\n", dev->name, vq->id);
108 vq->q_state = VFU_VQ_ACTIVE;
109
110 return 0;
111 }
112
113 static void
virtio_dev_unmap_vq(struct vfu_virtio_dev * dev,struct vfu_virtio_vq * vq)114 virtio_dev_unmap_vq(struct vfu_virtio_dev *dev, struct vfu_virtio_vq *vq)
115 {
116 SPDK_DEBUGLOG(vfu_virtio, "%s: unmap vq %u\n", dev->name, vq->id);
117 vq->q_state = VFU_VQ_INACTIVE;
118
119 vfu_virtio_unmap_q(dev, &vq->desc);
120 vfu_virtio_unmap_q(dev, &vq->avail);
121 vfu_virtio_unmap_q(dev, &vq->used);
122 }
123
124 static bool
vfu_virtio_vq_should_unmap(struct vfu_virtio_vq * vq,void * map_start,void * map_end)125 vfu_virtio_vq_should_unmap(struct vfu_virtio_vq *vq, void *map_start, void *map_end)
126 {
127 /* always do unmap when stopping the device */
128 if (!map_start || !map_end) {
129 return true;
130 }
131
132 if (vq->desc.addr >= map_start && vq->desc.addr < map_end) {
133 return true;
134 }
135
136 if (vq->avail.addr >= map_start && vq->avail.addr < map_end) {
137 return true;
138 }
139
140 if (vq->used.addr >= map_start && vq->used.addr < map_end) {
141 return true;
142 }
143
144 return false;
145 }
146
147 static void
vfu_virtio_dev_unmap_vqs(struct vfu_virtio_dev * dev,void * map_start,void * map_end)148 vfu_virtio_dev_unmap_vqs(struct vfu_virtio_dev *dev, void *map_start, void *map_end)
149 {
150 uint32_t i;
151 struct vfu_virtio_vq *vq;
152
153 for (i = 0; i < dev->num_queues; i++) {
154 vq = &dev->vqs[i];
155 if (!vq->enabled) {
156 continue;
157 }
158
159 if (!vfu_virtio_vq_should_unmap(vq, map_start, map_end)) {
160 continue;
161 }
162 virtio_dev_unmap_vq(dev, vq);
163 }
164 }
165
166 /* This function is used to notify VM that the device
167 * configuration space has been changed.
168 */
169 void
vfu_virtio_notify_config(struct vfu_virtio_endpoint * virtio_endpoint)170 vfu_virtio_notify_config(struct vfu_virtio_endpoint *virtio_endpoint)
171 {
172 struct spdk_vfu_endpoint *endpoint = virtio_endpoint->endpoint;
173
174 if (virtio_endpoint->dev == NULL) {
175 return;
176 }
177
178 virtio_endpoint->dev->cfg.isr = 1;
179 virtio_endpoint->dev->cfg.config_generation++;
180
181 vfu_irq_trigger(spdk_vfu_get_vfu_ctx(endpoint), virtio_endpoint->dev->cfg.msix_config);
182 }
183
184 static void
vfu_virtio_dev_reset(struct vfu_virtio_dev * dev)185 vfu_virtio_dev_reset(struct vfu_virtio_dev *dev)
186 {
187 uint32_t i;
188 struct vfu_virtio_vq *vq;
189
190 SPDK_DEBUGLOG(vfu_virtio, "device %s resetting\n", dev->name);
191
192 for (i = 0; i < dev->num_queues; i++) {
193 vq = &dev->vqs[i];
194
195 vq->q_state = VFU_VQ_CREATED;
196 vq->vector = 0;
197 vq->enabled = false;
198 vq->last_avail_idx = 0;
199 vq->last_used_idx = 0;
200
201 vq->packed.packed_ring = false;
202 vq->packed.avail_phase = 0;
203 vq->packed.used_phase = 0;
204 }
205
206 memset(&dev->cfg, 0, sizeof(struct virtio_pci_cfg));
207 }
208
209 static int
virtio_dev_set_status(struct vfu_virtio_dev * dev,uint8_t status)210 virtio_dev_set_status(struct vfu_virtio_dev *dev, uint8_t status)
211 {
212 int ret = 0;
213
214 SPDK_DEBUGLOG(vfu_virtio, "device current status %x, set status %x\n", dev->cfg.device_status,
215 status);
216
217 if (!(virtio_dev_is_started(dev))) {
218 if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
219 ret = vfu_virtio_dev_start(dev);
220 }
221 } else {
222 if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
223 ret = vfu_virtio_dev_stop(dev);
224 }
225 }
226
227 if (ret) {
228 SPDK_ERRLOG("Failed to start/stop device\n");
229 return ret;
230 }
231
232 dev->cfg.device_status = status;
233
234 if (status == 0) {
235 vfu_virtio_dev_reset(dev);
236 }
237
238 return 0;
239 }
240
241 static int
virtio_dev_set_features(struct vfu_virtio_dev * dev,uint64_t features)242 virtio_dev_set_features(struct vfu_virtio_dev *dev, uint64_t features)
243 {
244 if (dev->cfg.device_status & VIRTIO_CONFIG_S_FEATURES_OK) {
245 SPDK_ERRLOG("Feature negotiation has finished\n");
246 return -EINVAL;
247 }
248
249 if (features & ~dev->host_features) {
250 SPDK_ERRLOG("Host features 0x%"PRIx64", guest features 0x%"PRIx64"\n",
251 dev->host_features, features);
252 return -ENOTSUP;
253 }
254
255 SPDK_DEBUGLOG(vfu_virtio, "%s: negotiated features 0x%"PRIx64"\n", dev->name,
256 features);
257 dev->cfg.guest_features = features;
258
259 return 0;
260 }
261
262 static int
virtio_dev_enable_vq(struct vfu_virtio_dev * dev,uint16_t qid)263 virtio_dev_enable_vq(struct vfu_virtio_dev *dev, uint16_t qid)
264 {
265 struct vfu_virtio_vq *vq;
266
267 SPDK_DEBUGLOG(vfu_virtio, "%s: enable vq %u\n", dev->name, qid);
268
269 vq = &dev->vqs[qid];
270 if (vq->enabled) {
271 SPDK_ERRLOG("Queue %u is enabled\n", qid);
272 return -EINVAL;
273 }
274 vq->enabled = true;
275
276 if (virtio_dev_map_vq(dev, vq)) {
277 SPDK_ERRLOG("Queue %u failed to map\n", qid);
278 return 0;
279 }
280
281 vq->avail.avail->idx = 0;
282 vq->last_avail_idx = 0;
283 vq->used.used->idx = 0;
284 vq->last_used_idx = 0;
285
286 if (virtio_guest_has_feature(dev, VIRTIO_F_RING_PACKED)) {
287 SPDK_DEBUGLOG(vfu_virtio, "%s: vq %u PACKED RING ENABLED\n", dev->name, qid);
288 vq->packed.packed_ring = true;
289 vq->packed.avail_phase = true;
290 vq->packed.used_phase = true;
291 }
292
293 return 0;
294 }
295
296 static int
virtio_dev_disable_vq(struct vfu_virtio_dev * dev,uint16_t qid)297 virtio_dev_disable_vq(struct vfu_virtio_dev *dev, uint16_t qid)
298 {
299 struct vfu_virtio_vq *vq;
300
301 SPDK_DEBUGLOG(vfu_virtio, "%s: disable vq %u\n", dev->name, qid);
302
303 vq = &dev->vqs[qid];
304 if (!vq->enabled) {
305 SPDK_NOTICELOG("Queue %u isn't enabled\n", qid);
306 return 0;
307 }
308
309 virtio_dev_unmap_vq(dev, vq);
310
311 vq->q_state = VFU_VQ_CREATED;
312 vq->vector = 0;
313 vq->enabled = false;
314 vq->last_avail_idx = 0;
315 vq->last_used_idx = 0;
316 vq->packed.packed_ring = false;
317 vq->packed.avail_phase = 0;
318 vq->packed.used_phase = 0;
319
320 return 0;
321 }
322
323 static int
virtio_dev_split_get_avail_reqs(struct vfu_virtio_dev * dev,struct vfu_virtio_vq * vq,uint16_t * reqs,uint16_t max_reqs)324 virtio_dev_split_get_avail_reqs(struct vfu_virtio_dev *dev, struct vfu_virtio_vq *vq,
325 uint16_t *reqs, uint16_t max_reqs)
326 {
327 uint16_t count, i, avail_idx, last_idx;
328
329 last_idx = vq->last_avail_idx;
330 avail_idx = vq->avail.avail->idx;
331
332 spdk_smp_rmb();
333
334 count = avail_idx - last_idx;
335 if (count == 0) {
336 return 0;
337 }
338
339 count = spdk_min(count, max_reqs);
340 vq->last_avail_idx += count;
341
342 for (i = 0; i < count; i++) {
343 reqs[i] = vq->avail.avail->ring[(last_idx + i) & (vq->qsize - 1)];
344 }
345
346 SPDK_DEBUGLOG(vfu_virtio_io,
347 "AVAIL: vq %u last_idx=%"PRIu16" avail_idx=%"PRIu16" count=%"PRIu16"\n",
348 vq->id, last_idx, avail_idx, count);
349
350 return count;
351 }
352
353 static int
virtio_vring_split_desc_get_next(struct vring_desc ** desc,struct vring_desc * desc_table,uint32_t desc_table_size)354 virtio_vring_split_desc_get_next(struct vring_desc **desc,
355 struct vring_desc *desc_table,
356 uint32_t desc_table_size)
357 {
358 struct vring_desc *old_desc = *desc;
359 uint16_t next_idx;
360
361 if ((old_desc->flags & VRING_DESC_F_NEXT) == 0) {
362 *desc = NULL;
363 return 0;
364 }
365
366 next_idx = old_desc->next;
367 if (spdk_unlikely(next_idx >= desc_table_size)) {
368 *desc = NULL;
369 return -1;
370 }
371
372 *desc = &desc_table[next_idx];
373 return 0;
374 }
375
376 static inline void *
virtio_vring_desc_to_iov(struct vfu_virtio_dev * dev,struct vring_desc * desc,dma_sg_t * sg,struct iovec * iov)377 virtio_vring_desc_to_iov(struct vfu_virtio_dev *dev, struct vring_desc *desc,
378 dma_sg_t *sg, struct iovec *iov)
379 {
380 struct vfu_virtio_endpoint *virtio_endpoint = dev->virtio_endpoint;
381
382 return spdk_vfu_map_one(virtio_endpoint->endpoint, desc->addr, desc->len,
383 sg, iov, PROT_READ | PROT_WRITE);
384 }
385
386 static int
virtio_split_vring_get_desc(struct vfu_virtio_dev * dev,struct vfu_virtio_vq * vq,uint16_t desc_idx,struct vring_desc ** desc,struct vring_desc ** desc_table,uint32_t * desc_table_size,dma_sg_t * sg,struct iovec * iov)387 virtio_split_vring_get_desc(struct vfu_virtio_dev *dev, struct vfu_virtio_vq *vq, uint16_t desc_idx,
388 struct vring_desc **desc, struct vring_desc **desc_table,
389 uint32_t *desc_table_size,
390 dma_sg_t *sg, struct iovec *iov)
391 {
392 *desc = &vq->desc.desc[desc_idx];
393
394 if (virtio_vring_split_desc_is_indirect(*desc)) {
395 *desc_table_size = (*desc)->len / sizeof(struct vring_desc);
396 *desc_table = virtio_vring_desc_to_iov(dev, *desc, sg, iov);
397 *desc = *desc_table;
398 if (*desc == NULL) {
399 return -EINVAL;
400 }
401 return 0;
402 }
403
404 *desc_table = vq->desc.desc;
405 *desc_table_size = vq->qsize;
406
407 return 0;
408 }
409
410 static inline dma_sg_t *
virtio_req_to_sg_t(struct vfu_virtio_req * req,uint32_t iovcnt)411 virtio_req_to_sg_t(struct vfu_virtio_req *req, uint32_t iovcnt)
412 {
413 return (dma_sg_t *)(req->sg + iovcnt * dma_sg_size());
414 }
415
416 static inline struct vfu_virtio_req *
vfu_virtio_dev_get_req(struct vfu_virtio_endpoint * virtio_endpoint,struct vfu_virtio_vq * vq)417 vfu_virtio_dev_get_req(struct vfu_virtio_endpoint *virtio_endpoint, struct vfu_virtio_vq *vq)
418 {
419 struct vfu_virtio_req *req;
420
421 req = STAILQ_FIRST(&vq->free_reqs);
422 if (req == NULL) {
423 return NULL;
424 }
425 STAILQ_REMOVE_HEAD(&vq->free_reqs, link);
426
427 req->iovcnt = 0;
428 req->used_len = 0;
429 req->payload_size = 0;
430 req->req_idx = 0;
431 req->buffer_id = 0;
432 req->num_descs = 0;
433
434 return req;
435 }
436
437 void
vfu_virtio_dev_put_req(struct vfu_virtio_req * req)438 vfu_virtio_dev_put_req(struct vfu_virtio_req *req)
439 {
440 struct vfu_virtio_dev *dev = req->dev;
441 struct vfu_virtio_endpoint *virtio_endpoint = dev->virtio_endpoint;
442 vfu_ctx_t *vfu_ctx = spdk_vfu_get_vfu_ctx(virtio_endpoint->endpoint);
443
444 if (req->indirect_iov->iov_base) {
445 vfu_sgl_put(vfu_ctx, req->indirect_sg, req->indirect_iov, 1);
446 req->indirect_iov->iov_base = NULL;
447 req->indirect_iov->iov_len = 0;
448 }
449
450 if (req->iovcnt) {
451 vfu_sgl_put(vfu_ctx, virtio_req_to_sg_t(req, 0), req->iovs, req->iovcnt);
452 req->iovcnt = 0;
453 }
454
455 STAILQ_INSERT_HEAD(&req->vq->free_reqs, req, link);
456 }
457
458 void
vfu_virtio_finish_req(struct vfu_virtio_req * req)459 vfu_virtio_finish_req(struct vfu_virtio_req *req)
460 {
461 struct vfu_virtio_dev *dev = req->dev;
462 struct vfu_virtio_endpoint *virtio_endpoint = dev->virtio_endpoint;
463
464 assert(virtio_endpoint->io_outstanding);
465 virtio_endpoint->io_outstanding--;
466
467 if (!virtio_guest_has_feature(req->dev, VIRTIO_F_RING_PACKED)) {
468 virtio_vq_used_ring_split_enqueue(req->vq, req->req_idx, req->used_len);
469 } else {
470 virtio_vq_used_ring_packed_enqueue(req->vq, req->buffer_id, req->num_descs, req->used_len);
471 }
472
473 vfu_virtio_dev_put_req(req);
474 }
475
476 static inline void
vfu_virtio_dev_free_reqs(struct vfu_virtio_endpoint * virtio_endpoint,struct vfu_virtio_dev * dev)477 vfu_virtio_dev_free_reqs(struct vfu_virtio_endpoint *virtio_endpoint, struct vfu_virtio_dev *dev)
478 {
479 struct vfu_virtio_req *req;
480 struct vfu_virtio_vq *vq;
481 uint32_t i;
482
483 for (i = 0; i < dev->num_queues; i++) {
484 vq = &dev->vqs[i];
485 while (!STAILQ_EMPTY(&vq->free_reqs)) {
486 req = STAILQ_FIRST(&vq->free_reqs);
487 STAILQ_REMOVE_HEAD(&vq->free_reqs, link);
488 vfu_virtio_vq_free_req(virtio_endpoint, vq, req);
489 }
490 }
491 }
492
493 static int
virtio_dev_split_iovs_setup(struct vfu_virtio_dev * dev,struct vfu_virtio_vq * vq,uint16_t desc_idx,struct vfu_virtio_req * req)494 virtio_dev_split_iovs_setup(struct vfu_virtio_dev *dev, struct vfu_virtio_vq *vq,
495 uint16_t desc_idx, struct vfu_virtio_req *req)
496 {
497 struct vring_desc *desc, *desc_table;
498 uint32_t desc_table_size, len = 0;
499 uint32_t desc_handled_cnt = 0;
500 int rc;
501
502 rc = virtio_split_vring_get_desc(dev, vq, desc_idx, &desc,
503 &desc_table, &desc_table_size,
504 req->indirect_sg, req->indirect_iov);
505 if (spdk_unlikely(rc)) {
506 SPDK_ERRLOG("Invalid descriptor at index %"PRIu16".\n", desc_idx);
507 return rc;
508 }
509
510 assert(req->iovcnt == 0);
511
512 while (true) {
513 if (spdk_unlikely(!virtio_vring_desc_to_iov(dev, desc, virtio_req_to_sg_t(req, req->iovcnt),
514 &req->iovs[req->iovcnt]))) {
515 return -EINVAL;
516 }
517 req->desc_writeable[req->iovcnt] = false;
518 if (virtio_vring_split_desc_is_wr(desc)) {
519 req->desc_writeable[req->iovcnt] = true;
520 }
521
522 req->iovcnt++;
523 len += desc->len;
524
525 rc = virtio_vring_split_desc_get_next(&desc, desc_table, desc_table_size);
526 if (spdk_unlikely(rc)) {
527 return rc;
528 } else if (desc == NULL) {
529 break;
530 }
531
532 desc_handled_cnt++;
533 if (spdk_unlikely(desc_handled_cnt > desc_table_size)) {
534 return -EINVAL;
535 }
536 }
537
538 req->payload_size = len;
539
540 return 0;
541 }
542
543 void
virtio_vq_used_ring_split_enqueue(struct vfu_virtio_vq * vq,uint16_t req_idx,uint32_t used_len)544 virtio_vq_used_ring_split_enqueue(struct vfu_virtio_vq *vq, uint16_t req_idx, uint32_t used_len)
545 {
546 uint16_t last_idx = vq->last_used_idx & (vq->qsize - 1);
547
548 SPDK_DEBUGLOG(vfu_virtio_io,
549 "Queue %u - USED RING: last_idx=%"PRIu16" req_idx=%"PRIu16" used_len=%"PRIu32"\n",
550 vq->id, last_idx, req_idx, used_len);
551
552 vq->used.used->ring[last_idx].id = req_idx;
553 vq->used.used->ring[last_idx].len = used_len;
554 vq->last_used_idx++;
555
556 spdk_smp_wmb();
557
558 *(volatile uint16_t *)&vq->used.used->idx = vq->last_used_idx;
559
560 vq->used_req_cnt++;
561 }
562
563 void
virtio_vq_used_ring_packed_enqueue(struct vfu_virtio_vq * vq,uint16_t buffer_id,uint32_t num_descs,uint32_t used_len)564 virtio_vq_used_ring_packed_enqueue(struct vfu_virtio_vq *vq, uint16_t buffer_id, uint32_t num_descs,
565 uint32_t used_len)
566 {
567 struct vring_packed_desc *desc = &vq->desc.desc_packed[vq->last_used_idx];
568
569 SPDK_DEBUGLOG(vfu_virtio_io,
570 "Queue %u - USED RING: buffer_id=%"PRIu16" num_descs=%u used_len=%"PRIu32"\n",
571 vq->id, buffer_id, num_descs, used_len);
572
573 if (spdk_unlikely(virtio_vring_packed_is_used(desc, vq->packed.used_phase))) {
574 SPDK_ERRLOG("descriptor has been used before\n");
575 return;
576 }
577
578 /* In used desc addr is unused and len specifies the buffer length
579 * that has been written to by the device.
580 */
581 desc->addr = 0;
582 desc->len = used_len;
583
584 /* This bit specifies whether any data has been written by the device */
585 if (used_len != 0) {
586 desc->flags |= VRING_DESC_F_WRITE;
587 }
588
589 /* Buffer ID is included in the last descriptor in the list.
590 * The driver needs to keep track of the size of the list corresponding
591 * to each buffer ID.
592 */
593 desc->id = buffer_id;
594
595 /* A device MUST NOT make the descriptor used before buffer_id is
596 * written to the descriptor.
597 */
598 spdk_smp_wmb();
599
600 /* To mark a desc as used, the device sets the F_USED bit in flags to match
601 * the internal Device ring wrap counter. It also sets the F_AVAIL bit to
602 * match the same value.
603 */
604 if (vq->packed.used_phase) {
605 desc->flags |= (1 << VRING_PACKED_DESC_F_AVAIL);
606 desc->flags |= (1 << VRING_PACKED_DESC_F_USED);
607 } else {
608 desc->flags &= ~(1 << VRING_PACKED_DESC_F_AVAIL);
609 desc->flags &= ~(1 << VRING_PACKED_DESC_F_USED);
610 }
611
612 vq->last_used_idx += num_descs;
613 if (vq->last_used_idx >= vq->qsize) {
614 vq->last_used_idx -= vq->qsize;
615 vq->packed.used_phase = !vq->packed.used_phase;
616 }
617
618 vq->used_req_cnt++;
619 }
620
621 static int
vfu_virtio_vq_post_irq(struct vfu_virtio_dev * dev,struct vfu_virtio_vq * vq)622 vfu_virtio_vq_post_irq(struct vfu_virtio_dev *dev, struct vfu_virtio_vq *vq)
623 {
624 struct vfu_virtio_endpoint *virtio_endpoint = dev->virtio_endpoint;
625 vfu_ctx_t *vfu_ctx = spdk_vfu_get_vfu_ctx(virtio_endpoint->endpoint);
626
627 vq->used_req_cnt = 0;
628
629 if (spdk_vfu_endpoint_msix_enabled(virtio_endpoint->endpoint)) {
630 SPDK_DEBUGLOG(vfu_virtio_io, "%s: Queue %u post MSIX IV %u\n",
631 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
632 vq->id, vq->vector);
633 return vfu_irq_trigger(vfu_ctx, vq->vector);
634 } else {
635 if (!spdk_vfu_endpoint_intx_enabled(virtio_endpoint->endpoint)) {
636 SPDK_DEBUGLOG(vfu_virtio_io, "%s: IRQ disabled\n",
637 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint));
638 return 0;
639 }
640
641 SPDK_DEBUGLOG(vfu_virtio_io, "%s: Queue %u post ISR\n",
642 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint), vq->id);
643 dev->cfg.isr = 1;
644 return vfu_irq_trigger(vfu_ctx, 0);
645 }
646 }
647
648 void
vfu_virtio_vq_flush_irq(struct vfu_virtio_dev * dev,struct vfu_virtio_vq * vq)649 vfu_virtio_vq_flush_irq(struct vfu_virtio_dev *dev, struct vfu_virtio_vq *vq)
650 {
651 struct vfu_virtio_endpoint *virtio_endpoint = dev->virtio_endpoint;
652 uint32_t delay_us;
653
654 if (vq->used_req_cnt == 0) {
655 return;
656 }
657
658 /* No need to notify client */
659 if (virtio_queue_event_is_suppressed(dev, vq)) {
660 return;
661 }
662
663 /* Interrupt coalescing disabled */
664 if (!virtio_endpoint->coalescing_delay_us) {
665 vfu_virtio_vq_post_irq(dev, vq);
666 return;
667 }
668
669 /* No need for event right now */
670 if (spdk_get_ticks() < vq->next_event_time) {
671 return;
672 }
673
674 vfu_virtio_vq_post_irq(dev, vq);
675
676 delay_us = virtio_endpoint->coalescing_delay_us;
677 vq->next_event_time = spdk_get_ticks() + delay_us * spdk_get_ticks_hz() / (1000000ULL);
678 }
679
680 int
vfu_virtio_dev_process_split_ring(struct vfu_virtio_dev * dev,struct vfu_virtio_vq * vq)681 vfu_virtio_dev_process_split_ring(struct vfu_virtio_dev *dev, struct vfu_virtio_vq *vq)
682 {
683 struct vfu_virtio_endpoint *virtio_endpoint = dev->virtio_endpoint;
684 struct vfu_virtio_req *req;
685 uint16_t reqs_idx[VIRTIO_DEV_VRING_MAX_REQS];
686 uint16_t reqs_cnt, i;
687 int ret;
688
689 reqs_cnt = virtio_dev_split_get_avail_reqs(dev, vq, reqs_idx, VIRTIO_DEV_VRING_MAX_REQS);
690 if (!reqs_cnt) {
691 return 0;
692 }
693
694 SPDK_DEBUGLOG(vfu_virtio_io, "%s: get %u descriptors\n", dev->name, reqs_cnt);
695
696 for (i = 0; i < reqs_cnt; i++) {
697 req = vfu_virtio_dev_get_req(virtio_endpoint, vq);
698 if (spdk_unlikely(!req)) {
699 SPDK_ERRLOG("Error to get request\n");
700 /* TODO: address the error case */
701 return -EIO;
702 }
703
704 req->req_idx = reqs_idx[i];
705 ret = virtio_dev_split_iovs_setup(dev, vq, req->req_idx, req);
706 if (spdk_unlikely(ret)) {
707 /* let the device to response this error */
708 SPDK_ERRLOG("Split vring setup failed with index %u\n", i);
709 }
710
711 assert(virtio_endpoint->virtio_ops.exec_request);
712 virtio_endpoint->io_outstanding++;
713 virtio_endpoint->virtio_ops.exec_request(virtio_endpoint, vq, req);
714 }
715
716 return i;
717 }
718
719 struct vfu_virtio_req *
virito_dev_split_ring_get_next_avail_req(struct vfu_virtio_dev * dev,struct vfu_virtio_vq * vq)720 virito_dev_split_ring_get_next_avail_req(struct vfu_virtio_dev *dev, struct vfu_virtio_vq *vq)
721 {
722 struct vfu_virtio_endpoint *virtio_endpoint = dev->virtio_endpoint;
723 struct vfu_virtio_req *req;
724 uint16_t reqs_idx[VIRTIO_DEV_VRING_MAX_REQS];
725 uint16_t reqs_cnt;
726 int ret;
727
728 reqs_cnt = virtio_dev_split_get_avail_reqs(dev, vq, reqs_idx, 1);
729 if (!reqs_cnt) {
730 return NULL;
731 }
732 assert(reqs_cnt == 1);
733
734 SPDK_DEBUGLOG(vfu_virtio_io, "%s: get 1 descriptors\n", dev->name);
735
736 req = vfu_virtio_dev_get_req(virtio_endpoint, vq);
737 if (!req) {
738 SPDK_ERRLOG("Error to get request\n");
739 return NULL;
740 }
741
742 req->req_idx = reqs_idx[0];
743 ret = virtio_dev_split_iovs_setup(dev, vq, req->req_idx, req);
744 if (ret) {
745 SPDK_ERRLOG("Split vring setup failed\n");
746 vfu_virtio_dev_put_req(req);
747 return NULL;
748 }
749
750 return req;
751 }
752
753 static inline void *
virtio_vring_packed_desc_to_iov(struct vfu_virtio_dev * dev,struct vring_packed_desc * desc,dma_sg_t * sg,struct iovec * iov)754 virtio_vring_packed_desc_to_iov(struct vfu_virtio_dev *dev, struct vring_packed_desc *desc,
755 dma_sg_t *sg, struct iovec *iov)
756 {
757 struct vfu_virtio_endpoint *virtio_endpoint = dev->virtio_endpoint;
758
759 return spdk_vfu_map_one(virtio_endpoint->endpoint, desc->addr, desc->len,
760 sg, iov, PROT_READ | PROT_WRITE);
761 }
762
763 static int
virtio_dev_packed_iovs_setup(struct vfu_virtio_dev * dev,struct vfu_virtio_vq * vq,uint16_t last_avail_idx,struct vring_packed_desc * current_desc,struct vfu_virtio_req * req)764 virtio_dev_packed_iovs_setup(struct vfu_virtio_dev *dev, struct vfu_virtio_vq *vq,
765 uint16_t last_avail_idx,
766 struct vring_packed_desc *current_desc, struct vfu_virtio_req *req)
767 {
768 struct vring_packed_desc *desc, *desc_table = NULL;
769 uint16_t new_idx, num_descs, desc_table_size = 0;
770 uint32_t len = 0;
771
772 SPDK_DEBUGLOG(vfu_virtio_io, "%s: last avail idx %u, req %p\n", dev->name, last_avail_idx, req);
773
774 desc = NULL;
775 num_descs = 1;
776 if (virtio_vring_packed_desc_is_indirect(current_desc)) {
777 req->buffer_id = current_desc->id;
778 desc_table = virtio_vring_packed_desc_to_iov(dev, current_desc, req->indirect_sg,
779 req->indirect_iov);
780 if (spdk_unlikely(desc_table == NULL)) {
781 SPDK_ERRLOG("Map Indirect Desc to IOV failed\n");
782 return -EINVAL;
783 }
784 desc_table_size = current_desc->len / sizeof(struct vring_packed_desc);
785 desc = desc_table;
786 SPDK_DEBUGLOG(vfu_virtio_io, "%s: indirect desc %p, desc size %u, req %p\n",
787 dev->name, desc_table, desc_table_size, req);
788 } else {
789 desc = current_desc;
790 }
791
792 assert(req->iovcnt == 0);
793 /* Map descs to IOVs */
794 new_idx = last_avail_idx;
795 while (1) {
796 assert(desc != NULL);
797 if (spdk_unlikely(req->iovcnt == VIRTIO_DEV_MAX_IOVS)) {
798 SPDK_ERRLOG("Max IOVs in request reached (iovcnt = %d).\n", req->iovcnt);
799 return -EINVAL;
800 }
801
802 if (spdk_unlikely(!virtio_vring_packed_desc_to_iov(dev, desc, virtio_req_to_sg_t(req, req->iovcnt),
803 &req->iovs[req->iovcnt]))) {
804 SPDK_ERRLOG("Map Desc to IOV failed (iovcnt = %d).\n", req->iovcnt);
805 return -EINVAL;
806 }
807 req->desc_writeable[req->iovcnt] = false;
808 if (virtio_vring_packed_desc_is_wr(desc)) {
809 req->desc_writeable[req->iovcnt] = true;
810 }
811
812 req->iovcnt++;
813 len += desc->len;
814
815 /* get next desc */
816 if (desc_table) {
817 if (req->iovcnt < desc_table_size) {
818 desc = &desc_table[req->iovcnt];
819 } else {
820 desc = NULL;
821 }
822 } else {
823 if ((desc->flags & VRING_DESC_F_NEXT) == 0) {
824 req->buffer_id = desc->id;
825 desc = NULL;
826 } else {
827 new_idx = (new_idx + 1) % vq->qsize;
828 desc = &vq->desc.desc_packed[new_idx];
829 num_descs++;
830 req->buffer_id = desc->id;
831 }
832 }
833
834 if (desc == NULL) {
835 break;
836 }
837 }
838
839 req->num_descs = num_descs;
840 vq->last_avail_idx = (new_idx + 1) % vq->qsize;
841 if (vq->last_avail_idx < last_avail_idx) {
842 vq->packed.avail_phase = !vq->packed.avail_phase;
843 }
844
845 req->payload_size = len;
846
847 SPDK_DEBUGLOG(vfu_virtio_io, "%s: req %p, iovcnt %u, num_descs %u\n",
848 dev->name, req, req->iovcnt, num_descs);
849 return 0;
850 }
851
852 int
vfu_virtio_dev_process_packed_ring(struct vfu_virtio_dev * dev,struct vfu_virtio_vq * vq)853 vfu_virtio_dev_process_packed_ring(struct vfu_virtio_dev *dev, struct vfu_virtio_vq *vq)
854 {
855 struct vfu_virtio_endpoint *virtio_endpoint = dev->virtio_endpoint;
856 struct vring_packed_desc *desc;
857 int ret;
858 struct vfu_virtio_req *req;
859 uint16_t i, max_reqs;
860
861 max_reqs = VIRTIO_DEV_VRING_MAX_REQS;
862 for (i = 0; i < max_reqs; i++) {
863 desc = &vq->desc.desc_packed[vq->last_avail_idx];
864 if (!virtio_vring_packed_is_avail(desc, vq->packed.avail_phase)) {
865 return i;
866 }
867
868 req = vfu_virtio_dev_get_req(virtio_endpoint, vq);
869 if (spdk_unlikely(!req)) {
870 SPDK_ERRLOG("Error to get request\n");
871 /* TODO: address the error case */
872 assert(false);
873 return -EIO;
874 }
875
876 ret = virtio_dev_packed_iovs_setup(dev, vq, vq->last_avail_idx, desc, req);
877 if (spdk_unlikely(ret)) {
878 /* let the device to response the error */
879 SPDK_ERRLOG("virtio_dev_packed_iovs_setup failed\n");
880 }
881
882 assert(virtio_endpoint->virtio_ops.exec_request);
883 virtio_endpoint->io_outstanding++;
884 virtio_endpoint->virtio_ops.exec_request(virtio_endpoint, vq, req);
885 }
886
887 return i;
888 }
889
890 struct vfu_virtio_req *
virito_dev_packed_ring_get_next_avail_req(struct vfu_virtio_dev * dev,struct vfu_virtio_vq * vq)891 virito_dev_packed_ring_get_next_avail_req(struct vfu_virtio_dev *dev, struct vfu_virtio_vq *vq)
892 {
893 struct vfu_virtio_endpoint *virtio_endpoint = dev->virtio_endpoint;
894 struct vring_packed_desc *desc;
895 int ret;
896 struct vfu_virtio_req *req;
897
898 desc = &vq->desc.desc_packed[vq->last_avail_idx];
899 if (!virtio_vring_packed_is_avail(desc, vq->packed.avail_phase)) {
900 return NULL;
901 }
902
903 SPDK_DEBUGLOG(vfu_virtio_io, "%s: get 1 descriptors\n", dev->name);
904
905 req = vfu_virtio_dev_get_req(virtio_endpoint, vq);
906 if (!req) {
907 SPDK_ERRLOG("Error to get request\n");
908 return NULL;
909 }
910
911 ret = virtio_dev_packed_iovs_setup(dev, vq, vq->last_avail_idx, desc, req);
912 if (ret) {
913 SPDK_ERRLOG("virtio_dev_packed_iovs_setup failed\n");
914 vfu_virtio_dev_put_req(req);
915 return NULL;
916 }
917
918 return req;
919 }
920
921 static int
virtio_vfu_pci_common_cfg(struct vfu_virtio_endpoint * virtio_endpoint,char * buf,size_t count,loff_t pos,bool is_write)922 virtio_vfu_pci_common_cfg(struct vfu_virtio_endpoint *virtio_endpoint, char *buf,
923 size_t count, loff_t pos, bool is_write)
924 {
925 struct vfu_virtio_dev *dev = virtio_endpoint->dev;
926 uint32_t offset, value = 0;
927 int ret;
928
929 assert(count <= 4);
930 offset = pos - VIRTIO_PCI_COMMON_CFG_OFFSET;
931
932 if (is_write) {
933 memcpy(&value, buf, count);
934 switch (offset) {
935 case VIRTIO_PCI_COMMON_DFSELECT:
936 dev->cfg.host_feature_select = value;
937 SPDK_DEBUGLOG(vfu_virtio, "%s: WRITE PCI_COMMON_DFSELECT with 0x%x\n",
938 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
939 value);
940 break;
941 case VIRTIO_PCI_COMMON_GFSELECT:
942 dev->cfg.guest_feature_select = value;
943 SPDK_DEBUGLOG(vfu_virtio, "%s: WRITE PCI_COMMON_GFSELECT with 0x%x\n",
944 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
945 value);
946 break;
947 case VIRTIO_PCI_COMMON_GF:
948 assert(dev->cfg.guest_feature_select <= 1);
949 if (dev->cfg.guest_feature_select) {
950 dev->cfg.guest_feat_hi = value;
951 SPDK_DEBUGLOG(vfu_virtio, "%s: WRITE PCI_COMMON_GF_HI with 0x%x\n",
952 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
953 value);
954 } else {
955 dev->cfg.guest_feat_lo = value;
956 SPDK_DEBUGLOG(vfu_virtio, "%s: WRITE PCI_COMMON_GF_LO with 0x%x\n",
957 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
958 value);
959 }
960
961 ret = virtio_dev_set_features(dev,
962 (((uint64_t)dev->cfg.guest_feat_hi << 32) | dev->cfg.guest_feat_lo));
963 if (ret) {
964 return ret;
965 }
966 break;
967 case VIRTIO_PCI_COMMON_MSIX:
968 dev->cfg.msix_config = value;
969 SPDK_DEBUGLOG(vfu_virtio, "%s: WRITE PCI_COMMON_MSIX with 0x%x\n",
970 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
971 value);
972 break;
973 case VIRTIO_PCI_COMMON_STATUS:
974 SPDK_DEBUGLOG(vfu_virtio, "%s: WRITE PCI_COMMON_STATUS with 0x%x\n",
975 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
976 value);
977 ret = virtio_dev_set_status(dev, value);
978 if (ret) {
979 return ret;
980 }
981 break;
982 case VIRTIO_PCI_COMMON_Q_SELECT:
983 if (value < VIRTIO_DEV_MAX_VQS) {
984 dev->cfg.queue_select = value;
985 }
986 SPDK_DEBUGLOG(vfu_virtio, "%s: WRITE PCI_COMMON_Q_SELECT with 0x%x\n",
987 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
988 value);
989 break;
990 case VIRTIO_PCI_COMMON_Q_SIZE:
991 dev->vqs[dev->cfg.queue_select].qsize = value;
992 SPDK_DEBUGLOG(vfu_virtio, "%s: WRITE PCI_COMMON_Q_SIZE with 0x%x\n",
993 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
994 value);
995 break;
996 case VIRTIO_PCI_COMMON_Q_MSIX:
997 dev->vqs[dev->cfg.queue_select].vector = value;
998 SPDK_DEBUGLOG(vfu_virtio, "%s: WRITE PCI_COMMON_Q_MSIX with 0x%x\n",
999 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1000 value);
1001 break;
1002 case VIRTIO_PCI_COMMON_Q_ENABLE:
1003 SPDK_DEBUGLOG(vfu_virtio, "%s: WRITE PCI_COMMON_Q_ENABLE with 0x%x\n",
1004 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1005 value);
1006 if (value == 1) {
1007 ret = virtio_dev_enable_vq(dev, dev->cfg.queue_select);
1008 if (ret) {
1009 return ret;
1010 }
1011 } else {
1012 ret = virtio_dev_disable_vq(dev, dev->cfg.queue_select);
1013 if (ret) {
1014 return ret;
1015 }
1016 }
1017 break;
1018 case VIRTIO_PCI_COMMON_Q_DESCLO:
1019 dev->vqs[dev->cfg.queue_select].desc_lo = value;
1020 SPDK_DEBUGLOG(vfu_virtio, "%s: WRITE queue %u PCI_COMMON_Q_DESCLO with 0x%x\n",
1021 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint), dev->cfg.queue_select, value);
1022 break;
1023 case VIRTIO_PCI_COMMON_Q_DESCHI:
1024 dev->vqs[dev->cfg.queue_select].desc_hi = value;
1025 SPDK_DEBUGLOG(vfu_virtio, "%s: WRITE queue %u PCI_COMMON_Q_DESCHI with 0x%x\n",
1026 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint), dev->cfg.queue_select, value);
1027 break;
1028 case VIRTIO_PCI_COMMON_Q_AVAILLO:
1029 dev->vqs[dev->cfg.queue_select].avail_lo = value;
1030 SPDK_DEBUGLOG(vfu_virtio, "%s: WRITE queue %u PCI_COMMON_Q_AVAILLO with 0x%x\n",
1031 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint), dev->cfg.queue_select, value);
1032 break;
1033 case VIRTIO_PCI_COMMON_Q_AVAILHI:
1034 dev->vqs[dev->cfg.queue_select].avail_hi = value;
1035 SPDK_DEBUGLOG(vfu_virtio, "%s: WRITE queue %u PCI_COMMON_Q_AVAILHI with 0x%x\n",
1036 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint), dev->cfg.queue_select, value);
1037 break;
1038 case VIRTIO_PCI_COMMON_Q_USEDLO:
1039 dev->vqs[dev->cfg.queue_select].used_lo = value;
1040 SPDK_DEBUGLOG(vfu_virtio, "%s: WRITE queue %u PCI_COMMON_Q_USEDLO with 0x%x\n",
1041 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint), dev->cfg.queue_select, value);
1042 break;
1043 case VIRTIO_PCI_COMMON_Q_USEDHI:
1044 dev->vqs[dev->cfg.queue_select].used_hi = value;
1045 SPDK_DEBUGLOG(vfu_virtio, "%s: WRITE queue %u PCI_COMMON_Q_USEDHI with 0x%x\n",
1046 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint), dev->cfg.queue_select, value);
1047 break;
1048
1049 default:
1050 SPDK_ERRLOG("%s: WRITE UNSUPPORTED offset 0x%x\n",
1051 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint), offset);
1052 errno = EIO;
1053 return -1;
1054 }
1055 } else {
1056 switch (offset) {
1057 case VIRTIO_PCI_COMMON_DFSELECT:
1058 value = dev->cfg.host_feature_select;
1059 SPDK_DEBUGLOG(vfu_virtio, "%s: READ PCI_COMMON_DFSELECT with 0x%x\n",
1060 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1061 value);
1062 break;
1063 case VIRTIO_PCI_COMMON_DF:
1064 assert(dev->cfg.host_feature_select <= 1);
1065 if (dev->cfg.host_feature_select) {
1066 value = dev->host_features >> 32;
1067 SPDK_DEBUGLOG(vfu_virtio, "%s: READ PCI_COMMON_DF_HI with 0x%x\n",
1068 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1069 value);
1070 } else {
1071 value = dev->host_features;
1072 SPDK_DEBUGLOG(vfu_virtio, "%s: READ PCI_COMMON_DF_LO with 0x%x\n",
1073 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1074 value);
1075 }
1076 break;
1077 case VIRTIO_PCI_COMMON_GFSELECT:
1078 value = dev->cfg.guest_feature_select;
1079 SPDK_DEBUGLOG(vfu_virtio, "%s: READ PCI_COMMON_GFSELECT with 0x%x\n",
1080 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1081 value);
1082 break;
1083 case VIRTIO_PCI_COMMON_GF:
1084 assert(dev->cfg.guest_feature_select <= 1);
1085 if (dev->cfg.guest_feature_select) {
1086 value = dev->cfg.guest_feat_hi;
1087 SPDK_DEBUGLOG(vfu_virtio, "%s: READ PCI_COMMON_GF_HI with 0x%x\n",
1088 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1089 value);
1090 } else {
1091 value = dev->cfg.guest_feat_lo;
1092 SPDK_DEBUGLOG(vfu_virtio, "%s: READ PCI_COMMON_GF_LO with 0x%x\n",
1093 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1094 value);
1095 }
1096 break;
1097 case VIRTIO_PCI_COMMON_MSIX:
1098 value = dev->cfg.msix_config;
1099 SPDK_DEBUGLOG(vfu_virtio, "%s: READ PCI_COMMON_MSIX with 0x%x\n",
1100 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1101 value);
1102 break;
1103 case VIRTIO_PCI_COMMON_NUMQ:
1104 value = dev->num_queues;
1105 SPDK_DEBUGLOG(vfu_virtio, "%s: READ PCI_COMMON_NUMQ with 0x%x\n",
1106 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1107 value);
1108 break;
1109 case VIRTIO_PCI_COMMON_STATUS:
1110 value = dev->cfg.device_status;
1111 SPDK_DEBUGLOG(vfu_virtio, "%s: READ PCI_COMMON_STATUS with 0x%x\n",
1112 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1113 value);
1114 break;
1115 case VIRTIO_PCI_COMMON_CFGGENERATION:
1116 value = dev->cfg.config_generation;
1117 SPDK_DEBUGLOG(vfu_virtio, "%s: READ PCI_COMMON_CFGGENERATION with 0x%x\n",
1118 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1119 value);
1120 break;
1121 case VIRTIO_PCI_COMMON_Q_NOFF:
1122 value = dev->cfg.queue_select;
1123 SPDK_DEBUGLOG(vfu_virtio, "%s: READ PCI_COMMON_Q_NOFF with 0x%x\n",
1124 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1125 value);
1126 break;
1127 case VIRTIO_PCI_COMMON_Q_SELECT:
1128 value = dev->cfg.queue_select;
1129 SPDK_DEBUGLOG(vfu_virtio, "%s: READ PCI_COMMON_Q_SELECT with 0x%x\n",
1130 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1131 value);
1132 break;
1133 case VIRTIO_PCI_COMMON_Q_SIZE:
1134 value = dev->vqs[dev->cfg.queue_select].qsize;
1135 SPDK_DEBUGLOG(vfu_virtio, "%s: READ queue %u PCI_COMMON_Q_SIZE with 0x%x\n",
1136 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1137 dev->cfg.queue_select, value);
1138 break;
1139 case VIRTIO_PCI_COMMON_Q_MSIX:
1140 value = dev->vqs[dev->cfg.queue_select].vector;
1141 SPDK_DEBUGLOG(vfu_virtio, "%s: READ queue %u PCI_COMMON_Q_MSIX with 0x%x\n",
1142 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1143 dev->cfg.queue_select, value);
1144 break;
1145 case VIRTIO_PCI_COMMON_Q_ENABLE:
1146 value = dev->vqs[dev->cfg.queue_select].enabled;
1147 SPDK_DEBUGLOG(vfu_virtio, "%s: READ queue %u PCI_COMMON_Q_ENABLE with 0x%x\n",
1148 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint), dev->cfg.queue_select, value);
1149 break;
1150 case VIRTIO_PCI_COMMON_Q_DESCLO:
1151 value = dev->vqs[dev->cfg.queue_select].desc_lo;
1152 SPDK_DEBUGLOG(vfu_virtio, "%s: READ queue %u PCI_COMMON_Q_DESCLO with 0x%x\n",
1153 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint), dev->cfg.queue_select, value);
1154 break;
1155 case VIRTIO_PCI_COMMON_Q_DESCHI:
1156 value = dev->vqs[dev->cfg.queue_select].desc_hi;
1157 SPDK_DEBUGLOG(vfu_virtio, "%s: READ queue %u PCI_COMMON_Q_DESCHI with 0x%x\n",
1158 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint), dev->cfg.queue_select, value);
1159 break;
1160 case VIRTIO_PCI_COMMON_Q_AVAILLO:
1161 value = dev->vqs[dev->cfg.queue_select].avail_lo;
1162 SPDK_DEBUGLOG(vfu_virtio, "%s: READ queue %u PCI_COMMON_Q_AVAILLO with 0x%x\n",
1163 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint), dev->cfg.queue_select, value);
1164 break;
1165 case VIRTIO_PCI_COMMON_Q_AVAILHI:
1166 value = dev->vqs[dev->cfg.queue_select].avail_hi;
1167 SPDK_DEBUGLOG(vfu_virtio, "%s: READ queue %u PCI_COMMON_Q_AVAILHI with 0x%x\n",
1168 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint), dev->cfg.queue_select, value);
1169 break;
1170 case VIRTIO_PCI_COMMON_Q_USEDLO:
1171 value = dev->vqs[dev->cfg.queue_select].used_lo;
1172 SPDK_DEBUGLOG(vfu_virtio, "%s: READ queue %u PCI_COMMON_Q_USEDLO with 0x%x\n",
1173 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint), dev->cfg.queue_select, value);
1174 break;
1175 case VIRTIO_PCI_COMMON_Q_USEDHI:
1176 value = dev->vqs[dev->cfg.queue_select].used_hi;
1177 SPDK_DEBUGLOG(vfu_virtio, "%s: READ queue %u PCI_COMMON_Q_USEDHI with 0x%x\n",
1178 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint), dev->cfg.queue_select, value);
1179 break;
1180 default:
1181 SPDK_ERRLOG("%s: READ UNSUPPORTED offset 0x%x\n",
1182 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint), offset);
1183 errno = EIO;
1184 return -1;
1185 }
1186 memcpy(buf, &value, count);
1187 }
1188
1189 return count;
1190 }
1191
1192 static int
virtio_vfu_device_specific_cfg(struct vfu_virtio_endpoint * virtio_endpoint,char * buf,size_t count,loff_t pos,bool is_write)1193 virtio_vfu_device_specific_cfg(struct vfu_virtio_endpoint *virtio_endpoint, char *buf,
1194 size_t count, loff_t pos, bool is_write)
1195 {
1196 loff_t offset;
1197 int ret = -1;
1198
1199 assert(count <= 8);
1200 offset = pos - VIRTIO_PCI_SPECIFIC_CFG_OFFSET;
1201 if (!is_write) {
1202 if (virtio_endpoint->virtio_ops.get_config) {
1203 ret = virtio_endpoint->virtio_ops.get_config(virtio_endpoint, buf, offset, count);
1204 }
1205 } else {
1206 if (virtio_endpoint->virtio_ops.set_config) {
1207 ret = virtio_endpoint->virtio_ops.set_config(virtio_endpoint, buf, offset, count);
1208 }
1209 }
1210
1211 if (ret < 0) {
1212 return ret;
1213 }
1214
1215 return count;
1216 }
1217
1218 static int
virtio_vfu_pci_isr(struct vfu_virtio_endpoint * virtio_endpoint,char * buf,size_t count,bool is_write)1219 virtio_vfu_pci_isr(struct vfu_virtio_endpoint *virtio_endpoint, char *buf,
1220 size_t count, bool is_write)
1221 {
1222 uint8_t *isr;
1223
1224 if (count != 1) {
1225 SPDK_ERRLOG("ISR register is 1 byte\n");
1226 errno = EIO;
1227 return -1;
1228 }
1229
1230 isr = buf;
1231
1232 if (!is_write) {
1233 SPDK_DEBUGLOG(vfu_virtio, "READ PCI ISR\n");
1234 /* Read-Acknowledge Clear */
1235 *isr = virtio_endpoint->dev->cfg.isr;
1236 virtio_endpoint->dev->cfg.isr = 0;
1237 } else {
1238 SPDK_ERRLOG("ISR register is RO\n");
1239 errno = EIO;
1240 return -1;
1241 }
1242
1243 return count;
1244 }
1245
1246 static ssize_t
virtio_vfu_access_bar4(vfu_ctx_t * vfu_ctx,char * buf,size_t count,loff_t pos,bool is_write)1247 virtio_vfu_access_bar4(vfu_ctx_t *vfu_ctx, char *buf, size_t count,
1248 loff_t pos,
1249 bool is_write)
1250 {
1251 struct spdk_vfu_endpoint *endpoint = vfu_get_private(vfu_ctx);
1252 struct vfu_virtio_endpoint *virtio_endpoint = spdk_vfu_get_endpoint_private(endpoint);
1253 uint64_t start, end;
1254
1255 start = pos;
1256 end = start + count;
1257 SPDK_DEBUGLOG(vfu_virtio, "%s: %s bar4 0x%"PRIX64"-0x%"PRIX64", len = %lu\n",
1258 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1259 is_write ? "write" : "read", start, end - 1, count);
1260
1261 if (end < VIRTIO_PCI_COMMON_CFG_OFFSET + VIRTIO_PCI_COMMON_CFG_LENGTH) {
1262 /* virtio PCI common configuration */
1263 return virtio_vfu_pci_common_cfg(virtio_endpoint, buf, count, pos, is_write);
1264 } else if (start >= VIRTIO_PCI_ISR_ACCESS_OFFSET &&
1265 end < VIRTIO_PCI_ISR_ACCESS_OFFSET + VIRTIO_PCI_ISR_ACCESS_LENGTH) {
1266 /* ISR access */
1267 return virtio_vfu_pci_isr(virtio_endpoint, buf, count, is_write);
1268 } else if (start >= VIRTIO_PCI_SPECIFIC_CFG_OFFSET &&
1269 end < VIRTIO_PCI_SPECIFIC_CFG_OFFSET + VIRTIO_PCI_SPECIFIC_CFG_LENGTH) {
1270 /* Device specific configuration */
1271 return virtio_vfu_device_specific_cfg(virtio_endpoint, buf, count, pos, is_write);
1272 } else if (start >= VIRTIO_PCI_NOTIFICATIONS_OFFSET &&
1273 end < VIRTIO_PCI_NOTIFICATIONS_OFFSET + VIRTIO_PCI_NOTIFICATIONS_LENGTH) {
1274 /* Notifications */
1275 /* Sparse mmap region by default, there are no MMIO R/W messages */
1276 assert(false);
1277 return count;
1278 } else {
1279 assert(false);
1280 }
1281
1282 return 0;
1283 }
1284
1285 int
vfu_virtio_post_memory_add(struct spdk_vfu_endpoint * endpoint,void * map_start,void * map_end)1286 vfu_virtio_post_memory_add(struct spdk_vfu_endpoint *endpoint, void *map_start, void *map_end)
1287 {
1288 struct vfu_virtio_endpoint *virtio_endpoint = spdk_vfu_get_endpoint_private(endpoint);
1289 struct vfu_virtio_dev *dev = virtio_endpoint->dev;
1290 uint32_t i;
1291
1292 if (!dev) {
1293 return 0;
1294 }
1295
1296 for (i = 0; i < dev->num_queues; i++) {
1297 /* Try to remap VQs if necessary */
1298 virtio_dev_map_vq(dev, &dev->vqs[i]);
1299 }
1300
1301 return 0;
1302 }
1303
1304 int
vfu_virtio_pre_memory_remove(struct spdk_vfu_endpoint * endpoint,void * map_start,void * map_end)1305 vfu_virtio_pre_memory_remove(struct spdk_vfu_endpoint *endpoint, void *map_start, void *map_end)
1306 {
1307 struct vfu_virtio_endpoint *virtio_endpoint = spdk_vfu_get_endpoint_private(endpoint);
1308
1309 if (virtio_endpoint->dev != NULL) {
1310 vfu_virtio_dev_unmap_vqs(virtio_endpoint->dev, map_start, map_end);
1311 }
1312
1313 return 0;
1314 }
1315
1316 int
vfu_virtio_pci_reset_cb(struct spdk_vfu_endpoint * endpoint)1317 vfu_virtio_pci_reset_cb(struct spdk_vfu_endpoint *endpoint)
1318 {
1319 struct vfu_virtio_endpoint *virtio_endpoint = spdk_vfu_get_endpoint_private(endpoint);
1320
1321 if (virtio_endpoint->dev) {
1322 vfu_virtio_dev_stop(virtio_endpoint->dev);
1323 vfu_virtio_dev_reset(virtio_endpoint->dev);
1324 }
1325
1326 return 0;
1327 }
1328
1329 static ssize_t
access_pci_config(vfu_ctx_t * vfu_ctx,char * buf,size_t count,loff_t offset,bool is_write)1330 access_pci_config(vfu_ctx_t *vfu_ctx, char *buf, size_t count, loff_t offset,
1331 bool is_write)
1332 {
1333 struct spdk_vfu_endpoint *endpoint = vfu_get_private(vfu_ctx);
1334 void *pci_config = spdk_vfu_endpoint_get_pci_config(endpoint);
1335
1336 SPDK_DEBUGLOG(vfu_virtio,
1337 "%s: PCI_CFG %s %#lx-%#lx\n",
1338 spdk_vfu_get_endpoint_id(endpoint), is_write ? "write" : "read",
1339 offset, offset + count);
1340
1341 if (is_write) {
1342 SPDK_ERRLOG("write %#lx-%#lx not supported\n",
1343 offset, offset + count);
1344 errno = EINVAL;
1345 return -1;
1346 }
1347
1348 if (offset + count > 0x1000) {
1349 SPDK_ERRLOG("access past end of extended PCI configuration space, want=%ld+%ld, max=%d\n",
1350 offset, count, 0x1000);
1351 errno = ERANGE;
1352 return -1;
1353 }
1354
1355 memcpy(buf, ((unsigned char *)pci_config) + offset, count);
1356 return count;
1357 }
1358
1359 static int
vfu_virtio_dev_start(struct vfu_virtio_dev * dev)1360 vfu_virtio_dev_start(struct vfu_virtio_dev *dev)
1361 {
1362 struct vfu_virtio_endpoint *virtio_endpoint = dev->virtio_endpoint;
1363 int ret = 0;
1364
1365 SPDK_DEBUGLOG(vfu_virtio, "start %s\n", dev->name);
1366
1367 if (virtio_dev_is_started(dev)) {
1368 SPDK_ERRLOG("Device %s is already started\n", dev->name);
1369 return -EFAULT;
1370 }
1371
1372 if (virtio_endpoint->virtio_ops.start_device) {
1373 virtio_endpoint->io_outstanding = 0;
1374 ret = virtio_endpoint->virtio_ops.start_device(virtio_endpoint);
1375 }
1376
1377 SPDK_DEBUGLOG(vfu_virtio, "%s is started with ret %d\n", dev->name, ret);
1378
1379 return ret;
1380 }
1381
1382 static int
vfu_virtio_dev_stop(struct vfu_virtio_dev * dev)1383 vfu_virtio_dev_stop(struct vfu_virtio_dev *dev)
1384 {
1385 struct vfu_virtio_endpoint *virtio_endpoint = dev->virtio_endpoint;
1386 int ret = 0;
1387
1388 SPDK_DEBUGLOG(vfu_virtio, "stop %s\n", dev->name);
1389
1390 if (!virtio_dev_is_started(dev)) {
1391 SPDK_DEBUGLOG(vfu_virtio, "%s isn't started\n", dev->name);
1392 return 0;
1393 }
1394
1395 if (virtio_endpoint->virtio_ops.stop_device) {
1396 ret = virtio_endpoint->virtio_ops.stop_device(virtio_endpoint);
1397 assert(ret == 0);
1398 }
1399
1400 /* Unmap all VQs */
1401 vfu_virtio_dev_unmap_vqs(dev, NULL, NULL);
1402
1403 return ret;
1404 }
1405
1406 int
vfu_virtio_detach_device(struct spdk_vfu_endpoint * endpoint)1407 vfu_virtio_detach_device(struct spdk_vfu_endpoint *endpoint)
1408 {
1409 struct vfu_virtio_endpoint *virtio_endpoint = spdk_vfu_get_endpoint_private(endpoint);
1410 struct vfu_virtio_dev *dev = virtio_endpoint->dev;
1411
1412 if (virtio_endpoint->dev == NULL) {
1413 return 0;
1414 }
1415
1416 SPDK_DEBUGLOG(vfu_virtio, "detach device %s\n", dev->name);
1417
1418 vfu_virtio_dev_stop(dev);
1419 vfu_virtio_dev_free_reqs(virtio_endpoint, dev);
1420 virtio_endpoint->dev = NULL;
1421 free(dev);
1422
1423 return 0;
1424 }
1425
1426 int
vfu_virtio_attach_device(struct spdk_vfu_endpoint * endpoint)1427 vfu_virtio_attach_device(struct spdk_vfu_endpoint *endpoint)
1428 {
1429 struct vfu_virtio_endpoint *virtio_endpoint = spdk_vfu_get_endpoint_private(endpoint);
1430 uint64_t supported_features = 0;
1431 struct vfu_virtio_dev *dev;
1432 struct vfu_virtio_vq *vq;
1433 struct vfu_virtio_req *req;
1434 uint32_t i, j;
1435 int ret = 0;
1436
1437 dev = calloc(1, sizeof(*dev) + virtio_endpoint->num_queues * 3 * dma_sg_size());
1438 if (dev == NULL) {
1439 return -ENOMEM;
1440 }
1441
1442 dev->num_queues = virtio_endpoint->num_queues;
1443 for (i = 0; i < dev->num_queues; i++) {
1444 vq = &dev->vqs[i];
1445 vq->id = i;
1446 vq->qsize = virtio_endpoint->qsize;
1447 vq->avail.sg = (dma_sg_t *)(dev->sg + i * dma_sg_size() * 3);
1448 vq->used.sg = (dma_sg_t *)((uint8_t *)vq->avail.sg + dma_sg_size());
1449 vq->desc.sg = (dma_sg_t *)((uint8_t *)vq->used.sg + dma_sg_size());
1450
1451 STAILQ_INIT(&vq->free_reqs);
1452 for (j = 0; j <= vq->qsize; j++) {
1453 req = vfu_virtio_vq_alloc_req(virtio_endpoint, vq);
1454 if (!req) {
1455 SPDK_ERRLOG("Error to allocate req\n");
1456 ret = -ENOMEM;
1457 goto out;
1458 }
1459 req->indirect_iov = &req->iovs[VIRTIO_DEV_MAX_IOVS];
1460 req->indirect_sg = virtio_req_to_sg_t(req, VIRTIO_DEV_MAX_IOVS);
1461 req->dev = dev;
1462 req->vq = vq;
1463 STAILQ_INSERT_TAIL(&vq->free_reqs, req, link);
1464 }
1465 }
1466
1467 if (virtio_endpoint->virtio_ops.get_device_features) {
1468 supported_features = virtio_endpoint->virtio_ops.get_device_features(virtio_endpoint);
1469 }
1470 dev->host_features = supported_features;
1471
1472 snprintf(dev->name, SPDK_VFU_MAX_NAME_LEN, "%s",
1473 spdk_vfu_get_endpoint_name(virtio_endpoint->endpoint));
1474 virtio_endpoint->dev = dev;
1475 dev->virtio_endpoint = virtio_endpoint;
1476 virtio_endpoint->thread = spdk_get_thread();
1477 return 0;
1478
1479 out:
1480 vfu_virtio_dev_free_reqs(virtio_endpoint, dev);
1481 return ret;
1482 }
1483
1484 int
vfu_virtio_endpoint_setup(struct vfu_virtio_endpoint * virtio_endpoint,struct spdk_vfu_endpoint * endpoint,char * basename,const char * endpoint_name,struct vfu_virtio_ops * ops)1485 vfu_virtio_endpoint_setup(struct vfu_virtio_endpoint *virtio_endpoint,
1486 struct spdk_vfu_endpoint *endpoint,
1487 char *basename, const char *endpoint_name,
1488 struct vfu_virtio_ops *ops)
1489 {
1490 char path[PATH_MAX] = "";
1491 int ret;
1492
1493 if (!ops) {
1494 return -EINVAL;
1495 }
1496
1497 ret = snprintf(path, PATH_MAX, "%s%s_bar4", basename, endpoint_name);
1498 if (ret < 0 || ret >= PATH_MAX) {
1499 SPDK_ERRLOG("%s: error to get socket path: %s.\n", basename, spdk_strerror(errno));
1500 return -EINVAL;
1501 }
1502
1503 ret = open(path, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
1504 if (ret == -1) {
1505 SPDK_ERRLOG("%s: failed to open device memory at %s.\n",
1506 path, spdk_strerror(errno));
1507 return ret;
1508 }
1509 unlink(path);
1510
1511 virtio_endpoint->devmem_fd = ret;
1512 ret = ftruncate(virtio_endpoint->devmem_fd, VIRTIO_PCI_BAR4_LENGTH);
1513 if (ret != 0) {
1514 SPDK_ERRLOG("%s: error to ftruncate file %s.\n", path,
1515 spdk_strerror(errno));
1516 close(virtio_endpoint->devmem_fd);
1517 return ret;
1518 }
1519
1520 virtio_endpoint->doorbells = mmap(NULL, VIRTIO_PCI_NOTIFICATIONS_LENGTH, PROT_READ | PROT_WRITE,
1521 MAP_SHARED,
1522 virtio_endpoint->devmem_fd, VIRTIO_PCI_NOTIFICATIONS_OFFSET);
1523 if (virtio_endpoint->doorbells == MAP_FAILED) {
1524 SPDK_ERRLOG("%s: error to mmap file %s.\n", path, spdk_strerror(errno));
1525 close(virtio_endpoint->devmem_fd);
1526 return -EFAULT;
1527 }
1528 virtio_endpoint->endpoint = endpoint;
1529 virtio_endpoint->virtio_ops = *ops;
1530 virtio_endpoint->num_queues = VIRTIO_DEV_MAX_VQS;
1531 virtio_endpoint->qsize = VIRTIO_VQ_DEFAULT_SIZE;
1532
1533 SPDK_DEBUGLOG(vfu_virtio, "mmap file %s, devmem_fd %d\n", path, virtio_endpoint->devmem_fd);
1534 return 0;
1535 }
1536
1537 int
vfu_virtio_endpoint_destruct(struct vfu_virtio_endpoint * virtio_endpoint)1538 vfu_virtio_endpoint_destruct(struct vfu_virtio_endpoint *virtio_endpoint)
1539 {
1540 if (virtio_endpoint->doorbells) {
1541 munmap((void *)virtio_endpoint->doorbells, VIRTIO_PCI_NOTIFICATIONS_LENGTH);
1542 }
1543
1544 if (virtio_endpoint->devmem_fd) {
1545 close(virtio_endpoint->devmem_fd);
1546 }
1547
1548 return 0;
1549 }
1550
1551 static int
vfu_virtio_quiesce_poll(void * ctx)1552 vfu_virtio_quiesce_poll(void *ctx)
1553 {
1554 struct vfu_virtio_endpoint *virtio_endpoint = ctx;
1555 vfu_ctx_t *vfu_ctx = spdk_vfu_get_vfu_ctx(virtio_endpoint->endpoint);
1556
1557 if (virtio_endpoint->io_outstanding) {
1558 return SPDK_POLLER_IDLE;
1559 }
1560
1561 spdk_poller_unregister(&virtio_endpoint->quiesce_poller);
1562 virtio_endpoint->quiesce_in_progress = false;
1563 vfu_device_quiesced(vfu_ctx, 0);
1564
1565 return SPDK_POLLER_BUSY;
1566 }
1567
1568 int
vfu_virtio_quiesce_cb(struct spdk_vfu_endpoint * endpoint)1569 vfu_virtio_quiesce_cb(struct spdk_vfu_endpoint *endpoint)
1570 {
1571 struct vfu_virtio_endpoint *virtio_endpoint = spdk_vfu_get_endpoint_private(endpoint);
1572
1573 if (virtio_endpoint->quiesce_in_progress) {
1574 return -EBUSY;
1575 }
1576
1577 if (!virtio_endpoint->io_outstanding) {
1578 return 0;
1579 }
1580
1581 virtio_endpoint->quiesce_in_progress = true;
1582 virtio_endpoint->quiesce_poller = SPDK_POLLER_REGISTER(vfu_virtio_quiesce_poll, virtio_endpoint,
1583 10);
1584
1585 return -EBUSY;
1586 }
1587
1588 static struct spdk_vfu_pci_device vfu_virtio_device_info = {
1589 .id = {
1590 .vid = SPDK_PCI_VID_VIRTIO,
1591 /* Realize when calling get device information */
1592 .did = 0x0,
1593 .ssvid = SPDK_PCI_VID_VIRTIO,
1594 .ssid = 0x0,
1595 },
1596
1597 .class = {
1598 /* 0x01, mass storage controller */
1599 .bcc = 0x01,
1600 /* 0x00, SCSI controller */
1601 .scc = 0x00,
1602 /* 0x00, SCSI controller - vendor specific interface */
1603 .pi = 0x00,
1604 },
1605
1606 .pmcap = {
1607 .hdr.id = PCI_CAP_ID_PM,
1608 .pmcs.nsfrst = 0x1,
1609 },
1610
1611 .pxcap = {
1612 .hdr.id = PCI_CAP_ID_EXP,
1613 .pxcaps.ver = 0x2,
1614 .pxdcap = {.rer = 0x1, .flrc = 0x1},
1615 .pxdcap2.ctds = 0x1,
1616 },
1617
1618 .msixcap = {
1619 .hdr.id = PCI_CAP_ID_MSIX,
1620 .mxc.ts = VIRTIO_DEV_MAX_VQS - 1,
1621 .mtab = {.tbir = 0x1, .to = 0x0},
1622 .mpba = {.pbir = 0x2, .pbao = 0x0},
1623 },
1624
1625 .nr_vendor_caps = 4,
1626
1627 .intr_ipin = 0x1,
1628 .nr_int_irqs = 0x1,
1629 .nr_msix_irqs = VIRTIO_DEV_MAX_VQS,
1630
1631 .regions = {
1632 /* BAR0 */
1633 {0},
1634 /* BAR1 */
1635 {
1636 .access_cb = NULL,
1637 .offset = 0,
1638 .fd = -1,
1639 .len = 0x1000,
1640 .flags = VFU_REGION_FLAG_RW,
1641 .nr_sparse_mmaps = 0,
1642 },
1643 /* BAR2 */
1644 {
1645 .access_cb = NULL,
1646 .offset = 0,
1647 .fd = -1,
1648 .len = 0x1000,
1649 .flags = VFU_REGION_FLAG_RW,
1650 .nr_sparse_mmaps = 0,
1651 },
1652 /* BAR3 */
1653 {0},
1654 /* BAR4 */
1655 {
1656 .access_cb = virtio_vfu_access_bar4,
1657 .offset = 0,
1658 .fd = -1,
1659 .len = VIRTIO_PCI_BAR4_LENGTH,
1660 .flags = VFU_REGION_FLAG_RW | VFU_REGION_FLAG_MEM,
1661 .nr_sparse_mmaps = 1,
1662 .mmaps = {
1663 {
1664 .offset = VIRTIO_PCI_NOTIFICATIONS_OFFSET,
1665 .len = VIRTIO_PCI_NOTIFICATIONS_LENGTH,
1666 },
1667 },
1668 },
1669 /* BAR5 */
1670 {0},
1671 /* BAR6 */
1672 {0},
1673 /* ROM */
1674 {0},
1675 /* PCI Config */
1676 {
1677 .access_cb = access_pci_config,
1678 .offset = 0,
1679 .fd = -1,
1680 .len = 0x1000,
1681 .flags = VFU_REGION_FLAG_RW,
1682 .nr_sparse_mmaps = 0,
1683 },
1684 },
1685 };
1686
1687 void
vfu_virtio_get_device_info(struct vfu_virtio_endpoint * virtio_endpoint,struct spdk_vfu_pci_device * device_info)1688 vfu_virtio_get_device_info(struct vfu_virtio_endpoint *virtio_endpoint,
1689 struct spdk_vfu_pci_device *device_info)
1690 {
1691 memcpy(device_info, &vfu_virtio_device_info, sizeof(*device_info));
1692
1693 /* BAR4 Region FD */
1694 device_info->regions[VFU_PCI_DEV_BAR4_REGION_IDX].fd = virtio_endpoint->devmem_fd;
1695 SPDK_DEBUGLOG(vfu_virtio, "%s: get device information, fd %d\n",
1696 spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
1697 virtio_endpoint->devmem_fd);
1698 }
1699
1700 static struct virtio_pci_cap common_cap = {
1701 .cap_vndr = PCI_CAP_ID_VNDR,
1702 .cap_len = sizeof(common_cap),
1703 .cfg_type = VIRTIO_PCI_CAP_COMMON_CFG,
1704 .bar = 4,
1705 .offset = VIRTIO_PCI_COMMON_CFG_OFFSET,
1706 .length = VIRTIO_PCI_COMMON_CFG_LENGTH,
1707 };
1708
1709 static struct virtio_pci_cap isr_cap = {
1710 .cap_vndr = PCI_CAP_ID_VNDR,
1711 .cap_len = sizeof(isr_cap),
1712 .cfg_type = VIRTIO_PCI_CAP_ISR_CFG,
1713 .bar = 4,
1714 .offset = VIRTIO_PCI_ISR_ACCESS_OFFSET,
1715 .length = VIRTIO_PCI_ISR_ACCESS_LENGTH,
1716 };
1717
1718 static struct virtio_pci_cap dev_cap = {
1719 .cap_vndr = PCI_CAP_ID_VNDR,
1720 .cap_len = sizeof(dev_cap),
1721 .cfg_type = VIRTIO_PCI_CAP_DEVICE_CFG,
1722 .bar = 4,
1723 .offset = VIRTIO_PCI_SPECIFIC_CFG_OFFSET,
1724 .length = VIRTIO_PCI_SPECIFIC_CFG_LENGTH,
1725 };
1726
1727 static struct virtio_pci_notify_cap notify_cap = {
1728 .cap = {
1729 .cap_vndr = PCI_CAP_ID_VNDR,
1730 .cap_len = sizeof(notify_cap),
1731 .cfg_type = VIRTIO_PCI_CAP_NOTIFY_CFG,
1732 .bar = 4,
1733 .offset = VIRTIO_PCI_NOTIFICATIONS_OFFSET,
1734 .length = VIRTIO_PCI_NOTIFICATIONS_LENGTH,
1735 },
1736 .notify_off_multiplier = 4,
1737 };
1738
1739 uint16_t
vfu_virtio_get_vendor_capability(struct spdk_vfu_endpoint * endpoint,char * buf,uint16_t buf_len,uint16_t idx)1740 vfu_virtio_get_vendor_capability(struct spdk_vfu_endpoint *endpoint, char *buf,
1741 uint16_t buf_len,
1742 uint16_t idx)
1743 {
1744 uint16_t len;
1745
1746 SPDK_DEBUGLOG(vfu_virtio, "%s: get vendor capability, idx %u\n",
1747 spdk_vfu_get_endpoint_id(endpoint), idx);
1748
1749 switch (idx) {
1750 case 0:
1751 assert(buf_len > sizeof(common_cap));
1752 memcpy(buf, &common_cap, sizeof(common_cap));
1753 len = sizeof(common_cap);
1754 break;
1755 case 1:
1756 assert(buf_len > sizeof(isr_cap));
1757 memcpy(buf, &isr_cap, sizeof(isr_cap));
1758 len = sizeof(isr_cap);
1759 break;
1760 case 2:
1761 assert(buf_len > sizeof(dev_cap));
1762 memcpy(buf, &dev_cap, sizeof(dev_cap));
1763 len = sizeof(dev_cap);
1764 break;
1765 case 3:
1766 assert(buf_len > sizeof(notify_cap));
1767 memcpy(buf, ¬ify_cap, sizeof(notify_cap));
1768 len = sizeof(notify_cap);
1769 break;
1770 default:
1771 return 0;
1772 }
1773
1774 return len;
1775 }
1776
1777 SPDK_LOG_REGISTER_COMPONENT(vfu_virtio)
1778 SPDK_LOG_REGISTER_COMPONENT(vfu_virtio_io)
1779