1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2022 Intel Corporation.
3 * All rights reserved.
4 */
5
6 /*
7 * virtio-scsi over vfio-user transport
8 */
9 #include <linux/virtio_scsi.h>
10
11 #include "spdk/stdinc.h"
12 #include "spdk/env.h"
13 #include "spdk/bdev.h"
14 #include "spdk/bdev_module.h"
15 #include "spdk/assert.h"
16 #include "spdk/barrier.h"
17 #include "spdk/thread.h"
18 #include "spdk/memory.h"
19 #include "spdk/util.h"
20 #include "spdk/log.h"
21 #include "spdk/string.h"
22 #include "spdk/likely.h"
23 #include "spdk/scsi.h"
24 #include "spdk/scsi_spec.h"
25 #include "spdk/pci_ids.h"
26
27 #include "vfu_virtio_internal.h"
28
29 #define VIRTIO_SCSI_SUPPORTED_FEATURES ((1ULL << VIRTIO_SCSI_F_INOUT) | \
30 (1ULL << VIRTIO_SCSI_F_HOTPLUG) | \
31 (1ULL << VIRTIO_SCSI_F_CHANGE))
32
33 #define VIRTIO_SCSI_CTRLR_MAX_TARGETS (8)
34
35 struct virtio_scsi_target {
36 struct spdk_scsi_dev *dev;
37 };
38
39 struct virtio_scsi_endpoint {
40 struct vfu_virtio_endpoint virtio;
41
42 struct virtio_scsi_config scsi_cfg;
43 /* virtio_scsi specific configurations */
44 struct virtio_scsi_target targets[VIRTIO_SCSI_CTRLR_MAX_TARGETS];
45 /* virtio_scsi SCSI task and IO ring process poller */
46 struct spdk_poller *ring_poller;
47 };
48
49 struct virtio_scsi_req {
50 struct spdk_scsi_task scsi;
51 union {
52 struct virtio_scsi_cmd_req *cmd_req;
53 struct virtio_scsi_ctrl_tmf_req *tmf_req;
54 };
55 union {
56 struct virtio_scsi_cmd_resp *cmd_resp;
57 struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
58 };
59 struct virtio_scsi_endpoint *endpoint;
60 /* KEEP req at last */
61 struct vfu_virtio_req req;
62 };
63
64 static inline struct virtio_scsi_endpoint *
to_scsi_endpoint(struct vfu_virtio_endpoint * virtio_endpoint)65 to_scsi_endpoint(struct vfu_virtio_endpoint *virtio_endpoint)
66 {
67 return SPDK_CONTAINEROF(virtio_endpoint, struct virtio_scsi_endpoint, virtio);
68 }
69
70 static inline struct virtio_scsi_req *
to_scsi_request(struct vfu_virtio_req * request)71 to_scsi_request(struct vfu_virtio_req *request)
72 {
73 return SPDK_CONTAINEROF(request, struct virtio_scsi_req, req);
74 }
75
76 static void
virtio_scsi_req_finish(struct virtio_scsi_req * scsi_req)77 virtio_scsi_req_finish(struct virtio_scsi_req *scsi_req)
78 {
79 struct vfu_virtio_req *req = &scsi_req->req;
80
81 vfu_virtio_finish_req(req);
82 }
83
84 static int
vfu_virtio_scsi_vring_poll(void * ctx)85 vfu_virtio_scsi_vring_poll(void *ctx)
86 {
87 struct virtio_scsi_endpoint *scsi_endpoint = ctx;
88 struct vfu_virtio_dev *dev = scsi_endpoint->virtio.dev;
89 struct vfu_virtio_vq *vq;
90 uint32_t i, count = 0;
91
92 if (spdk_unlikely(!virtio_dev_is_started(dev))) {
93 return SPDK_POLLER_IDLE;
94 }
95
96 if (spdk_unlikely(scsi_endpoint->virtio.quiesce_in_progress)) {
97 return SPDK_POLLER_IDLE;
98 }
99
100 /* We don't process event queue here */
101 for (i = 0; i < dev->num_queues; i++) {
102 if (i == 1) {
103 continue;
104 }
105
106 vq = &dev->vqs[i];
107 if (!vq->enabled || vq->q_state != VFU_VQ_ACTIVE) {
108 continue;
109 }
110
111 vfu_virtio_vq_flush_irq(dev, vq);
112
113 if (vq->packed.packed_ring) {
114 /* packed vring */
115 count += vfu_virtio_dev_process_packed_ring(dev, vq);
116 } else {
117 /* split vring */
118 count += vfu_virtio_dev_process_split_ring(dev, vq);
119 }
120 }
121
122 return count ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
123 }
124
125 static void
vfu_virtio_scsi_eventq_enqueue(struct virtio_scsi_endpoint * scsi_endpoint,uint8_t scsi_target_num,uint32_t event,uint32_t reason)126 vfu_virtio_scsi_eventq_enqueue(struct virtio_scsi_endpoint *scsi_endpoint, uint8_t scsi_target_num,
127 uint32_t event, uint32_t reason)
128 {
129 struct vfu_virtio_dev *dev = scsi_endpoint->virtio.dev;
130 struct vfu_virtio_req *req = NULL;
131 struct virtio_scsi_req *scsi_req;
132 struct virtio_scsi_event *desc_ev;
133 struct vfu_virtio_vq *vq;
134
135 assert(dev != NULL);
136
137 if (scsi_target_num >= VIRTIO_SCSI_CTRLR_MAX_TARGETS) {
138 return;
139 }
140
141 if (spdk_unlikely(scsi_endpoint->virtio.quiesce_in_progress)) {
142 return;
143 }
144
145 /* event queue */
146 vq = &dev->vqs[1];
147 if (!vq->enabled || vq->q_state != VFU_VQ_ACTIVE) {
148 return;
149 }
150
151 if (vq->packed.packed_ring) {
152 /* packed vring */
153 req = virito_dev_packed_ring_get_next_avail_req(dev, vq);
154 } else {
155 /* split vring */
156 req = virito_dev_split_ring_get_next_avail_req(dev, vq);
157 }
158
159 if (!req) {
160 return;
161 }
162 scsi_req = to_scsi_request(req);
163 scsi_req->endpoint = scsi_endpoint;
164 /* add 1 for scsi event */
165 scsi_endpoint->virtio.io_outstanding++;
166
167 assert(req->iovcnt == 1);
168 assert(req->iovs[0].iov_len == sizeof(struct virtio_scsi_event));
169 desc_ev = req->iovs[0].iov_base;
170
171 desc_ev->event = event;
172 desc_ev->lun[0] = 1;
173 desc_ev->lun[1] = scsi_target_num;
174 /* virtio LUN id 0 can refer either to the entire device
175 * or actual LUN 0 (the only supported by vhost for now)
176 */
177 desc_ev->lun[2] = 0 >> 8;
178 desc_ev->lun[3] = 0 & 0xFF;
179 /* virtio doesn't specify any strict format for LUN id (bytes 2 and 3)
180 * current implementation relies on linux kernel sources
181 */
182 memset(&desc_ev->lun[4], 0, 4);
183 desc_ev->reason = reason;
184
185 req->used_len = sizeof(*desc_ev);
186
187 SPDK_DEBUGLOG(vfu_virtio_scsi, "%s: SCSI Target Num %u, Desc %p, Event %u, Reason %u\n",
188 spdk_vfu_get_endpoint_name(scsi_endpoint->virtio.endpoint), scsi_target_num, desc_ev, event,
189 reason);
190
191 virtio_scsi_req_finish(scsi_req);
192 vfu_virtio_vq_flush_irq(dev, vq);
193 }
194
195 static int
virtio_scsi_start(struct vfu_virtio_endpoint * virtio_endpoint)196 virtio_scsi_start(struct vfu_virtio_endpoint *virtio_endpoint)
197 {
198 struct virtio_scsi_endpoint *scsi_endpoint = to_scsi_endpoint(virtio_endpoint);
199 struct virtio_scsi_target *scsi_target;
200 uint8_t i;
201 int ret;
202
203 if (scsi_endpoint->ring_poller) {
204 return 0;
205 }
206
207 SPDK_DEBUGLOG(vfu_virtio_scsi, "starting %s\n",
208 spdk_vfu_get_endpoint_name(scsi_endpoint->virtio.endpoint));
209
210 for (i = 0; i < VIRTIO_SCSI_CTRLR_MAX_TARGETS; i++) {
211 scsi_target = &scsi_endpoint->targets[i];
212 if (scsi_target->dev) {
213 ret = spdk_scsi_dev_allocate_io_channels(scsi_target->dev);
214 if (ret) {
215 SPDK_ERRLOG("%s: Couldn't allocate io channel for SCSI target %u.\n",
216 spdk_vfu_get_endpoint_name(scsi_endpoint->virtio.endpoint), i);
217 continue;
218 }
219 }
220 }
221
222 scsi_endpoint->ring_poller = SPDK_POLLER_REGISTER(vfu_virtio_scsi_vring_poll, scsi_endpoint,
223 0);
224
225 return 0;
226 }
227
228 static int
virtio_scsi_stop(struct vfu_virtio_endpoint * virtio_endpoint)229 virtio_scsi_stop(struct vfu_virtio_endpoint *virtio_endpoint)
230 {
231 struct virtio_scsi_endpoint *scsi_endpoint = to_scsi_endpoint(virtio_endpoint);
232 struct virtio_scsi_target *scsi_target;
233 uint8_t i;
234
235 SPDK_DEBUGLOG(vfu_virtio_scsi, "stopping %s\n",
236 spdk_vfu_get_endpoint_name(scsi_endpoint->virtio.endpoint));
237
238 spdk_poller_unregister(&scsi_endpoint->ring_poller);
239
240 for (i = 0; i < VIRTIO_SCSI_CTRLR_MAX_TARGETS; i++) {
241 scsi_target = &scsi_endpoint->targets[i];
242 if (scsi_target->dev) {
243 spdk_scsi_dev_free_io_channels(scsi_target->dev);
244 }
245 }
246
247 return 0;
248 }
249
250 static void
virtio_scsi_task_cpl(struct spdk_scsi_task * scsi_task)251 virtio_scsi_task_cpl(struct spdk_scsi_task *scsi_task)
252 {
253 struct virtio_scsi_req *scsi_req = SPDK_CONTAINEROF(scsi_task, struct virtio_scsi_req, scsi);
254
255 scsi_req->cmd_resp->status = scsi_task->status;
256 if (scsi_task->status != SPDK_SCSI_STATUS_GOOD) {
257 scsi_req->cmd_resp->sense_len = scsi_task->sense_data_len;
258 memcpy(scsi_req->cmd_resp->sense, scsi_task->sense_data, scsi_task->sense_data_len);
259 }
260 assert(scsi_task->transfer_len == scsi_task->length);
261 scsi_req->cmd_resp->resid = scsi_task->length - scsi_task->data_transferred;
262
263 virtio_scsi_req_finish(scsi_req);
264 spdk_scsi_task_put(scsi_task);
265 }
266
267 static void
virtio_scsi_task_mgmt_cpl(struct spdk_scsi_task * scsi_task)268 virtio_scsi_task_mgmt_cpl(struct spdk_scsi_task *scsi_task)
269 {
270 struct virtio_scsi_req *scsi_req = SPDK_CONTAINEROF(scsi_task, struct virtio_scsi_req, scsi);
271
272 virtio_scsi_req_finish(scsi_req);
273 spdk_scsi_task_put(scsi_task);
274 }
275
276 static void
virtio_scsi_task_free_cb(struct spdk_scsi_task * scsi_task)277 virtio_scsi_task_free_cb(struct spdk_scsi_task *scsi_task)
278 {
279
280 }
281
282 static struct virtio_scsi_target *
virtio_scsi_cmd_lun_setup(struct virtio_scsi_endpoint * scsi_endpoint,struct virtio_scsi_req * scsi_req,__u8 * lun)283 virtio_scsi_cmd_lun_setup(struct virtio_scsi_endpoint *scsi_endpoint,
284 struct virtio_scsi_req *scsi_req, __u8 *lun)
285 {
286 struct virtio_scsi_target *scsi_target;
287 uint16_t lun_id = (((uint16_t)lun[2] << 8) | lun[3]) & 0x3FFF;
288
289 SPDK_LOGDUMP(vfu_virtio_scsi_data, "LUN", lun, 8);
290
291 /* First byte must be 1 and second is target */
292 if (lun[0] != 1 || lun[1] >= VIRTIO_SCSI_CTRLR_MAX_TARGETS) {
293 SPDK_DEBUGLOG(vfu_virtio_scsi, "Invalid LUN %u:%u\n", lun[0], lun[1]);
294 return NULL;
295 }
296
297 scsi_target = &scsi_endpoint->targets[lun[1]];
298 if (!scsi_target->dev) {
299 SPDK_DEBUGLOG(vfu_virtio_scsi, "SCSI Target num %u doesn't exist\n", lun[1]);
300 return NULL;
301 }
302
303 scsi_req->scsi.target_port = spdk_scsi_dev_find_port_by_id(scsi_target->dev, 0);
304 scsi_req->scsi.lun = spdk_scsi_dev_get_lun(scsi_target->dev, lun_id);
305 if (scsi_req->scsi.lun == NULL) {
306 SPDK_DEBUGLOG(vfu_virtio_scsi, "LUN %u:%u doesn't exist\n", lun[0], lun[1]);
307 return NULL;
308 }
309 SPDK_DEBUGLOG(vfu_virtio_scsi, "Got valid SCSI Target num %u, bdev %s\n", lun[1],
310 spdk_scsi_lun_get_bdev_name(scsi_req->scsi.lun));
311
312 return scsi_target;
313 }
314
315 static int
virtio_scsi_cmd_data_setup(struct virtio_scsi_req * scsi_req)316 virtio_scsi_cmd_data_setup(struct virtio_scsi_req *scsi_req)
317 {
318 struct iovec *iov;
319 uint32_t iovcnt;
320 uint32_t payload_len;
321
322 iov = &scsi_req->req.iovs[0];
323 iovcnt = scsi_req->req.iovcnt;
324 payload_len = scsi_req->req.payload_size;
325
326 if (spdk_unlikely(iov->iov_len < sizeof(struct virtio_scsi_cmd_req))) {
327 SPDK_ERRLOG("Invalid virtio_scsi command header length");
328 return -EINVAL;
329 }
330 if (spdk_unlikely(iovcnt < 2)) {
331 SPDK_ERRLOG("Invalid iovcnt %u\n", iovcnt);
332 return -EINVAL;
333 }
334
335 scsi_req->cmd_req = scsi_req->req.iovs[0].iov_base;
336 payload_len -= scsi_req->req.iovs[0].iov_len;
337
338 /*
339 * FROM_DEV (READ): [RO_req][WR_resp][WR_buf0]...[WR_bufN]
340 * TO_DEV (WRITE): [RO_req][RO_buf0]...[RO_bufN][WR_resp]
341 */
342 if (virtio_req_iov_is_wr(&scsi_req->req, 1)) {
343 scsi_req->scsi.dxfer_dir = SPDK_SCSI_DIR_FROM_DEV;
344 } else {
345 scsi_req->scsi.dxfer_dir = SPDK_SCSI_DIR_TO_DEV;
346 }
347
348 if (scsi_req->scsi.dxfer_dir == SPDK_SCSI_DIR_FROM_DEV) {
349 if (scsi_req->req.iovs[1].iov_len < sizeof(struct virtio_scsi_cmd_resp)) {
350 SPDK_ERRLOG("DIR_FROM_DEV: Invalid virtio_scsi command resp length");
351 return -EINVAL;
352 }
353 scsi_req->cmd_resp = scsi_req->req.iovs[1].iov_base;
354 scsi_req->req.used_len = payload_len;
355 scsi_req->scsi.iovs = &scsi_req->req.iovs[2];
356 } else {
357 if (scsi_req->req.iovs[iovcnt - 1].iov_len < sizeof(struct virtio_scsi_cmd_resp)) {
358 SPDK_ERRLOG("DIR_TO_DEV: Invalid virtio_scsi command resp length");
359 return -EINVAL;
360 }
361 scsi_req->req.used_len = sizeof(struct virtio_scsi_cmd_resp);
362 scsi_req->cmd_resp = scsi_req->req.iovs[iovcnt - 1].iov_base;
363 scsi_req->scsi.iovs = &scsi_req->req.iovs[1];
364 }
365
366 /* -2 for REQ and RESP */
367 iovcnt -= 2;
368 if (!iovcnt) {
369 scsi_req->scsi.length = 0;
370 scsi_req->scsi.transfer_len = 0;
371 scsi_req->scsi.iovs[0].iov_len = 0;
372 } else {
373 assert(payload_len > sizeof(struct virtio_scsi_cmd_resp));
374 payload_len -= sizeof(struct virtio_scsi_cmd_resp);
375 scsi_req->scsi.length = payload_len;
376 scsi_req->scsi.transfer_len = payload_len;
377 }
378 scsi_req->scsi.iovcnt = iovcnt;
379 scsi_req->scsi.cdb = scsi_req->cmd_req->cdb;
380 scsi_req->cmd_resp->response = VIRTIO_SCSI_S_OK;
381
382 SPDK_LOGDUMP(vfu_virtio_scsi_data, "CDB=", scsi_req->cmd_req->cdb, VIRTIO_SCSI_CDB_SIZE);
383 SPDK_DEBUGLOG(vfu_virtio_scsi, "%s, iovcnt %u, transfer_len %u, used len %u\n",
384 scsi_req->scsi.dxfer_dir == SPDK_SCSI_DIR_FROM_DEV ? "XFER_FROM_DEV" : "XFER_TO_DEV",
385 scsi_req->scsi.iovcnt, payload_len, scsi_req->req.used_len);
386
387 return 0;
388 }
389
390 static int
virtio_scsi_tmf_cmd_req(struct virtio_scsi_endpoint * scsi_endpoint,struct virtio_scsi_req * scsi_req)391 virtio_scsi_tmf_cmd_req(struct virtio_scsi_endpoint *scsi_endpoint,
392 struct virtio_scsi_req *scsi_req)
393 {
394 uint32_t iovcnt;
395 struct iovec *iov;
396 struct virtio_scsi_ctrl_tmf_req *tmf_req;
397 struct virtio_scsi_target *scsi_target;
398
399 iov = &scsi_req->req.iovs[0];
400 iovcnt = scsi_req->req.iovcnt;
401 tmf_req = iov->iov_base;
402 if (spdk_unlikely(iovcnt < 2)) {
403 SPDK_ERRLOG("Invalid iovcnt %u\n", iovcnt);
404 goto invalid;
405 }
406
407 memset(&scsi_req->scsi, 0, sizeof(struct spdk_scsi_task));
408 spdk_scsi_task_construct(&scsi_req->scsi, virtio_scsi_task_mgmt_cpl, virtio_scsi_task_free_cb);
409
410 switch (tmf_req->type) {
411 case VIRTIO_SCSI_T_TMF:
412 if (scsi_req->req.iovs[0].iov_len < sizeof(struct virtio_scsi_ctrl_tmf_req) ||
413 scsi_req->req.iovs[1].iov_len < sizeof(struct virtio_scsi_ctrl_tmf_resp)) {
414 SPDK_ERRLOG("Invalid size of tmf_req or tmf_resp\n");
415 goto invalid;
416 }
417 scsi_req->tmf_req = tmf_req;
418 scsi_req->tmf_resp = scsi_req->req.iovs[1].iov_base;
419 switch (tmf_req->subtype) {
420 case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
421 scsi_target = virtio_scsi_cmd_lun_setup(scsi_endpoint, scsi_req, scsi_req->tmf_req->lun);
422 if (!scsi_target) {
423 scsi_req->tmf_resp->response = VIRTIO_SCSI_S_BAD_TARGET;
424 break;
425 }
426 /* Management task submission */
427 scsi_req->tmf_resp->response = VIRTIO_SCSI_S_OK;
428 scsi_req->scsi.function = SPDK_SCSI_TASK_FUNC_LUN_RESET;
429 spdk_scsi_dev_queue_mgmt_task(scsi_target->dev, &scsi_req->scsi);
430 return 0;
431 break;
432 default:
433 scsi_req->tmf_resp->response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
434 break;
435 }
436 break;
437
438 case VIRTIO_SCSI_T_AN_QUERY:
439 case VIRTIO_SCSI_T_AN_SUBSCRIBE:
440 if (scsi_req->req.iovs[0].iov_len < sizeof(struct virtio_scsi_ctrl_an_req) ||
441 scsi_req->req.iovs[1].iov_len < sizeof(struct virtio_scsi_ctrl_an_resp)) {
442 SPDK_ERRLOG("Invalid size of tmf_req or tmf_resp\n");
443 goto invalid;
444 }
445 scsi_req->req.used_len = sizeof(struct virtio_scsi_ctrl_an_resp);
446 /* Do nothing to response byte of virtio_scsi_ctrl_an_resp */
447 goto invalid;
448 break;
449 default:
450 break;
451 }
452
453 invalid:
454 /* invalid request */
455 virtio_scsi_req_finish(scsi_req);
456 return -1;
457 }
458
459 static int
virtio_scsi_cmd_req(struct virtio_scsi_endpoint * scsi_endpoint,struct virtio_scsi_req * scsi_req)460 virtio_scsi_cmd_req(struct virtio_scsi_endpoint *scsi_endpoint, struct virtio_scsi_req *scsi_req)
461 {
462 int ret;
463 struct virtio_scsi_target *scsi_target;
464
465 memset(&scsi_req->scsi, 0, sizeof(struct spdk_scsi_task));
466 spdk_scsi_task_construct(&scsi_req->scsi, virtio_scsi_task_cpl, virtio_scsi_task_free_cb);
467
468 ret = virtio_scsi_cmd_data_setup(scsi_req);
469 if (ret) {
470 SPDK_ERRLOG("Error to setup SCSI command, ret %d\n", ret);
471 goto invalid;
472 }
473
474 scsi_target = virtio_scsi_cmd_lun_setup(scsi_endpoint, scsi_req, scsi_req->cmd_req->lun);
475 if (!scsi_target) {
476 scsi_req->cmd_resp->response = VIRTIO_SCSI_S_BAD_TARGET;
477 goto invalid;
478 }
479
480 spdk_scsi_dev_queue_task(scsi_target->dev, &scsi_req->scsi);
481 return 0;
482
483 invalid:
484 /* invalid request */
485 virtio_scsi_req_finish(scsi_req);
486 return ret;
487 }
488
489 static int
virtio_scsi_process_req(struct vfu_virtio_endpoint * virtio_endpoint,struct vfu_virtio_vq * vq,struct vfu_virtio_req * req)490 virtio_scsi_process_req(struct vfu_virtio_endpoint *virtio_endpoint, struct vfu_virtio_vq *vq,
491 struct vfu_virtio_req *req)
492 {
493 struct virtio_scsi_endpoint *scsi_endpoint = to_scsi_endpoint(virtio_endpoint);
494 struct virtio_scsi_req *scsi_req = to_scsi_request(req);
495
496 scsi_req->endpoint = scsi_endpoint;
497
498 /* SCSI task management command */
499 if (spdk_unlikely(vq->id == 0)) {
500 return virtio_scsi_tmf_cmd_req(scsi_endpoint, scsi_req);
501 }
502
503 /* SCSI command */
504 return virtio_scsi_cmd_req(scsi_endpoint, scsi_req);;
505 }
506
507 static void
virtio_scsi_update_config(struct virtio_scsi_endpoint * scsi_endpoint)508 virtio_scsi_update_config(struct virtio_scsi_endpoint *scsi_endpoint)
509 {
510 struct virtio_scsi_config *scsi_cfg;
511
512 if (!scsi_endpoint) {
513 return;
514 }
515
516 scsi_cfg = &scsi_endpoint->scsi_cfg;
517
518 scsi_cfg->num_queues = scsi_endpoint->virtio.num_queues;
519 /* -2 for REQ and RESP and -1 for region boundary splitting */
520 scsi_cfg->seg_max = spdk_min(VIRTIO_DEV_MAX_IOVS - 2 - 1, SPDK_BDEV_IO_NUM_CHILD_IOV - 2 - 1);
521 /* we can set `max_sectors` and `cmd_per_lun` based on bdevs */
522 scsi_cfg->max_sectors = 131072;
523 scsi_cfg->cmd_per_lun = scsi_endpoint->virtio.qsize;
524 scsi_cfg->event_info_size = sizeof(struct virtio_scsi_event);
525 scsi_cfg->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE;
526 scsi_cfg->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE;
527 scsi_cfg->max_channel = 0;
528 scsi_cfg->max_target = VIRTIO_SCSI_CTRLR_MAX_TARGETS;
529 scsi_cfg->max_lun = 16383;
530 }
531
532 static uint64_t
virtio_scsi_get_supported_features(struct vfu_virtio_endpoint * virtio_endpoint)533 virtio_scsi_get_supported_features(struct vfu_virtio_endpoint *virtio_endpoint)
534 {
535 uint64_t features;
536
537 features = VIRTIO_SCSI_SUPPORTED_FEATURES | VIRTIO_HOST_SUPPORTED_FEATURES;
538
539 if (!virtio_endpoint->packed_ring) {
540 features &= ~(1ULL << VIRTIO_F_RING_PACKED);
541 }
542
543 return features;
544 }
545
546 static int
virtio_scsi_get_device_specific_config(struct vfu_virtio_endpoint * virtio_endpoint,char * buf,uint64_t offset,uint64_t count)547 virtio_scsi_get_device_specific_config(struct vfu_virtio_endpoint *virtio_endpoint, char *buf,
548 uint64_t offset, uint64_t count)
549 {
550 struct virtio_scsi_endpoint *scsi_endpoint = to_scsi_endpoint(virtio_endpoint);
551 uint8_t *scsi_cfg;
552
553 if ((offset + count) > sizeof(struct virtio_scsi_config)) {
554 SPDK_ERRLOG("Invalid device specific configuration offset 0x%"PRIx64"\n", offset);
555 return -EINVAL;
556 }
557
558 scsi_cfg = (uint8_t *)&scsi_endpoint->scsi_cfg;
559 memcpy(buf, scsi_cfg + offset, count);
560
561 return 0;
562 }
563
564 static int
virtio_scsi_set_device_specific_config(struct vfu_virtio_endpoint * virtio_endpoint,char * buf,uint64_t offset,uint64_t count)565 virtio_scsi_set_device_specific_config(struct vfu_virtio_endpoint *virtio_endpoint, char *buf,
566 uint64_t offset, uint64_t count)
567 {
568 struct virtio_scsi_endpoint *scsi_endpoint = to_scsi_endpoint(virtio_endpoint);
569 uint32_t value;
570 int ret = 0;
571
572 if ((offset + count) > sizeof(struct virtio_scsi_config)) {
573 SPDK_ERRLOG("Invalid device specific configuration offset 0x%"PRIx64"\n", offset);
574 return -EINVAL;
575 }
576
577 switch (offset) {
578 case offsetof(struct virtio_scsi_config, sense_size):
579 value = *(uint32_t *)buf;
580 if (scsi_endpoint->scsi_cfg.sense_size != value) {
581 SPDK_ERRLOG("Sense data size set to %u\n", value);
582 ret = -ENOTSUP;
583 }
584 break;
585 case offsetof(struct virtio_scsi_config, cdb_size):
586 value = *(uint32_t *)buf;
587 if (scsi_endpoint->scsi_cfg.cdb_size != value) {
588 SPDK_ERRLOG("CDB size set to %u\n", value);
589 ret = -ENOTSUP;
590 }
591 break;
592 default:
593 SPDK_ERRLOG("Error offset %"PRIu64"\n", offset);
594 ret = -EINVAL;
595 break;
596 }
597
598
599 return ret;
600 }
601
602 static struct vfu_virtio_req *
virtio_scsi_alloc_req(struct vfu_virtio_endpoint * virtio_endpoint,struct vfu_virtio_vq * vq)603 virtio_scsi_alloc_req(struct vfu_virtio_endpoint *virtio_endpoint, struct vfu_virtio_vq *vq)
604 {
605 struct virtio_scsi_req *scsi_req;
606
607 scsi_req = calloc(1, sizeof(*scsi_req) + dma_sg_size() * (VIRTIO_DEV_MAX_IOVS + 1));
608 if (!scsi_req) {
609 return NULL;
610 }
611
612 return &scsi_req->req;
613 }
614
615 static void
virtio_scsi_free_req(struct vfu_virtio_endpoint * virtio_endpoint,struct vfu_virtio_vq * vq,struct vfu_virtio_req * req)616 virtio_scsi_free_req(struct vfu_virtio_endpoint *virtio_endpoint, struct vfu_virtio_vq *vq,
617 struct vfu_virtio_req *req)
618 {
619 struct virtio_scsi_req *scsi_req = to_scsi_request(req);
620
621 free(scsi_req);
622 }
623
624 struct vfu_virtio_ops virtio_scsi_ops = {
625 .get_device_features = virtio_scsi_get_supported_features,
626 .alloc_req = virtio_scsi_alloc_req,
627 .free_req = virtio_scsi_free_req,
628 .exec_request = virtio_scsi_process_req,
629 .get_config = virtio_scsi_get_device_specific_config,
630 .set_config = virtio_scsi_set_device_specific_config,
631 .start_device = virtio_scsi_start,
632 .stop_device = virtio_scsi_stop,
633 };
634
635 int
vfu_virtio_scsi_set_options(const char * name,uint16_t num_io_queues,uint16_t qsize,bool packed_ring)636 vfu_virtio_scsi_set_options(const char *name, uint16_t num_io_queues, uint16_t qsize,
637 bool packed_ring)
638 {
639 struct spdk_vfu_endpoint *endpoint;
640 uint32_t num_queues;
641 struct vfu_virtio_endpoint *virtio_endpoint;
642 struct virtio_scsi_endpoint *scsi_endpoint;
643
644 num_queues = num_io_queues + 2;
645
646 endpoint = spdk_vfu_get_endpoint_by_name(name);
647 if (!endpoint) {
648 SPDK_ERRLOG("Endpoint %s doesn't exist\n", name);
649 return -ENOENT;
650 }
651
652 virtio_endpoint = spdk_vfu_get_endpoint_private(endpoint);
653 scsi_endpoint = to_scsi_endpoint(virtio_endpoint);
654 if (virtio_endpoint->dev) {
655 SPDK_ERRLOG("Options are not allowed to change in runtime\n");
656 return -EFAULT;
657 }
658
659 if ((num_queues > 2) && (num_queues <= VIRTIO_DEV_MAX_VQS)) {
660 scsi_endpoint->virtio.num_queues = num_queues;
661 } else {
662 SPDK_NOTICELOG("Number of IO queue %u\n", VIRTIO_DEV_MAX_VQS - 2);
663 scsi_endpoint->virtio.num_queues = VIRTIO_DEV_MAX_VQS;
664 }
665
666 if (qsize && qsize <= VIRTIO_VQ_MAX_SIZE) {
667 scsi_endpoint->virtio.qsize = qsize;
668 } else {
669 SPDK_NOTICELOG("Use queue size %u\n", VIRTIO_VQ_DEFAULT_SIZE);
670 scsi_endpoint->virtio.qsize = VIRTIO_VQ_DEFAULT_SIZE;
671 }
672 scsi_endpoint->virtio.packed_ring = packed_ring;
673
674 SPDK_DEBUGLOG(vfu_virtio_scsi, "%s: num_queues %u, qsize %u, packed ring %s\n",
675 spdk_vfu_get_endpoint_id(endpoint),
676 scsi_endpoint->virtio.num_queues, scsi_endpoint->virtio.qsize,
677 packed_ring ? "enabled" : "disabled");
678
679 virtio_scsi_update_config(scsi_endpoint);
680
681 return 0;
682 }
683
684 struct virtio_scsi_event_ctx {
685 struct virtio_scsi_endpoint *scsi_endpoint;
686 struct virtio_scsi_target *scsi_target;
687 uint8_t scsi_target_num;
688 };
689
690 static uint8_t
get_scsi_target_num_by_lun(struct virtio_scsi_endpoint * scsi_endpoint,const struct spdk_scsi_lun * lun)691 get_scsi_target_num_by_lun(struct virtio_scsi_endpoint *scsi_endpoint,
692 const struct spdk_scsi_lun *lun)
693 {
694 const struct spdk_scsi_dev *scsi_dev;
695 struct virtio_scsi_target *scsi_target;
696 uint8_t i;
697
698 scsi_dev = spdk_scsi_lun_get_dev(lun);
699 for (i = 0; i < VIRTIO_SCSI_CTRLR_MAX_TARGETS; i++) {
700 scsi_target = &scsi_endpoint->targets[i];
701 if (scsi_target->dev == scsi_dev) {
702 return i;
703 }
704 }
705
706 return VIRTIO_SCSI_CTRLR_MAX_TARGETS;
707 }
708
709 static void
vfu_virtio_scsi_lun_resize_msg(void * ctx)710 vfu_virtio_scsi_lun_resize_msg(void *ctx)
711 {
712 struct virtio_scsi_event_ctx *resize_ctx = ctx;
713 struct virtio_scsi_endpoint *scsi_endpoint = resize_ctx->scsi_endpoint;
714 uint8_t scsi_target_num = resize_ctx->scsi_target_num;
715
716 free(resize_ctx);
717
718 if (virtio_guest_has_feature(scsi_endpoint->virtio.dev, VIRTIO_SCSI_F_CHANGE)) {
719 vfu_virtio_scsi_eventq_enqueue(scsi_endpoint, scsi_target_num,
720 VIRTIO_SCSI_T_PARAM_CHANGE, 0x2a | (0x09 << 8));
721 }
722 }
723
724 static void
vfu_virtio_scsi_lun_resize(const struct spdk_scsi_lun * lun,void * arg)725 vfu_virtio_scsi_lun_resize(const struct spdk_scsi_lun *lun, void *arg)
726 {
727 struct virtio_scsi_endpoint *scsi_endpoint = arg;
728 uint8_t scsi_target_num;
729 struct virtio_scsi_event_ctx *ctx;
730
731 scsi_target_num = get_scsi_target_num_by_lun(scsi_endpoint, lun);
732 if (scsi_target_num == VIRTIO_SCSI_CTRLR_MAX_TARGETS) {
733 return;
734 }
735
736 ctx = calloc(1, sizeof(*ctx));
737 if (!ctx) {
738 SPDK_ERRLOG("Error to allocate hotplug ctx\n");
739 return;
740 }
741 ctx->scsi_endpoint = scsi_endpoint;
742 ctx->scsi_target_num = scsi_target_num;
743
744 spdk_thread_send_msg(scsi_endpoint->virtio.thread, vfu_virtio_scsi_lun_resize_msg, ctx);
745 }
746
747 static void
vfu_virtio_scsi_lun_hotremove_msg(void * ctx)748 vfu_virtio_scsi_lun_hotremove_msg(void *ctx)
749 {
750 struct virtio_scsi_event_ctx *hotplug = ctx;
751 struct virtio_scsi_endpoint *scsi_endpoint = hotplug->scsi_endpoint;
752 struct virtio_scsi_target *scsi_target = hotplug->scsi_target;
753 struct spdk_scsi_dev *scsi_dev = scsi_target->dev;
754 uint8_t scsi_target_num = hotplug->scsi_target_num;
755
756 free(hotplug);
757
758 if (!scsi_dev) {
759 return;
760 }
761 scsi_target->dev = NULL;
762 spdk_scsi_dev_free_io_channels(scsi_dev);
763 spdk_scsi_dev_destruct(scsi_dev, NULL, NULL);
764
765 assert(scsi_endpoint->virtio.dev);
766 if (!virtio_dev_is_started(scsi_endpoint->virtio.dev)) {
767 return;
768 }
769
770 if (virtio_guest_has_feature(scsi_endpoint->virtio.dev, VIRTIO_SCSI_F_HOTPLUG)) {
771 SPDK_DEBUGLOG(vfu_virtio_scsi, "Target num %u, sending event\n", scsi_target_num);
772 vfu_virtio_scsi_eventq_enqueue(scsi_endpoint, scsi_target_num,
773 VIRTIO_SCSI_T_TRANSPORT_RESET, VIRTIO_SCSI_EVT_RESET_REMOVED);
774 }
775 }
776
777 static void
vfu_virtio_scsi_lun_hotremove(const struct spdk_scsi_lun * lun,void * arg)778 vfu_virtio_scsi_lun_hotremove(const struct spdk_scsi_lun *lun, void *arg)
779 {
780 struct virtio_scsi_endpoint *scsi_endpoint = arg;
781 struct virtio_scsi_target *scsi_target;
782 struct virtio_scsi_event_ctx *ctx;
783 uint8_t scsi_target_num;
784
785 if (!scsi_endpoint->virtio.dev) {
786 return;
787 }
788
789 scsi_target_num = get_scsi_target_num_by_lun(scsi_endpoint, lun);
790 if (scsi_target_num == VIRTIO_SCSI_CTRLR_MAX_TARGETS) {
791 return;
792 }
793 scsi_target = &scsi_endpoint->targets[scsi_target_num];
794 if (!scsi_target->dev) {
795 return;
796 }
797
798 SPDK_DEBUGLOG(vfu_virtio_scsi, "Removing bdev %s, Target num %u\n",
799 spdk_scsi_lun_get_bdev_name(lun), scsi_target_num);
800
801 ctx = calloc(1, sizeof(*ctx));
802 if (!ctx) {
803 SPDK_ERRLOG("Error to allocate hotplug ctx\n");
804 return;
805 }
806 ctx->scsi_endpoint = scsi_endpoint;
807 ctx->scsi_target = scsi_target;
808 ctx->scsi_target_num = scsi_target_num;
809
810 spdk_thread_send_msg(scsi_endpoint->virtio.thread, vfu_virtio_scsi_lun_hotremove_msg, ctx);
811 }
812
813 static void
vfu_virtio_scsi_lun_hotplug_msg(void * ctx)814 vfu_virtio_scsi_lun_hotplug_msg(void *ctx)
815 {
816 struct virtio_scsi_event_ctx *hotplug = ctx;
817 struct virtio_scsi_endpoint *scsi_endpoint = hotplug->scsi_endpoint;
818 struct virtio_scsi_target *scsi_target = hotplug->scsi_target;
819 uint8_t scsi_target_num = hotplug->scsi_target_num;
820 int ret;
821
822 free(hotplug);
823
824 assert(scsi_endpoint->virtio.dev);
825 if (!virtio_dev_is_started(scsi_endpoint->virtio.dev)) {
826 return;
827 }
828
829 ret = spdk_scsi_dev_allocate_io_channels(scsi_target->dev);
830 if (ret) {
831 SPDK_ERRLOG("%s: Couldn't allocate io channel for SCSI target %u.\n",
832 spdk_vfu_get_endpoint_name(scsi_endpoint->virtio.endpoint), scsi_target_num);
833 return;
834 }
835
836 if (virtio_guest_has_feature(scsi_endpoint->virtio.dev, VIRTIO_SCSI_F_HOTPLUG)) {
837 vfu_virtio_scsi_eventq_enqueue(scsi_endpoint, scsi_target_num,
838 VIRTIO_SCSI_T_TRANSPORT_RESET, VIRTIO_SCSI_EVT_RESET_RESCAN);
839 }
840 }
841
842 int
vfu_virtio_scsi_add_target(const char * name,uint8_t scsi_target_num,const char * bdev_name)843 vfu_virtio_scsi_add_target(const char *name, uint8_t scsi_target_num, const char *bdev_name)
844 {
845 struct spdk_vfu_endpoint *endpoint;
846 struct vfu_virtio_endpoint *virtio_endpoint;
847 struct virtio_scsi_endpoint *scsi_endpoint;
848 struct virtio_scsi_target *scsi_target;
849 char target_name[SPDK_SCSI_DEV_MAX_NAME];
850 int lun_id_list[1];
851 const char *bdev_names_list[1];
852
853 endpoint = spdk_vfu_get_endpoint_by_name(name);
854 if (!endpoint) {
855 SPDK_ERRLOG("Endpoint %s doesn't exist\n", name);
856 return -ENOENT;
857 }
858 virtio_endpoint = spdk_vfu_get_endpoint_private(endpoint);
859 scsi_endpoint = to_scsi_endpoint(virtio_endpoint);
860
861 if (scsi_target_num >= VIRTIO_SCSI_CTRLR_MAX_TARGETS) {
862 SPDK_ERRLOG("Invalid SCSI target number, maximum SCSI target number is %u\n",
863 VIRTIO_SCSI_CTRLR_MAX_TARGETS - 1);
864 return -EINVAL;
865 }
866 scsi_target = &scsi_endpoint->targets[scsi_target_num];
867 if (scsi_target->dev) {
868 SPDK_ERRLOG("SCSI Target %u is already occupied\n", scsi_target_num);
869 return -EEXIST;
870 }
871
872 snprintf(target_name, sizeof(target_name), "Target %u", scsi_target_num);
873 lun_id_list[0] = 0;
874 bdev_names_list[0] = (char *)bdev_name;
875
876 scsi_target->dev = spdk_scsi_dev_construct_ext(target_name, bdev_names_list, lun_id_list, 1,
877 SPDK_SPC_PROTOCOL_IDENTIFIER_SAS,
878 vfu_virtio_scsi_lun_resize, scsi_endpoint,
879 vfu_virtio_scsi_lun_hotremove, scsi_endpoint);
880 if (!scsi_target->dev) {
881 SPDK_ERRLOG("%s: couldn't create SCSI target %u via bdev %s\n", name, scsi_target_num, bdev_name);
882 return -EFAULT;
883 }
884 spdk_scsi_dev_add_port(scsi_target->dev, 0, "vfu-virtio-scsi");
885
886 SPDK_NOTICELOG("%s: added SCSI target %u using bdev '%s'\n", name, scsi_target_num, bdev_name);
887 virtio_scsi_update_config(scsi_endpoint);
888
889 if (virtio_endpoint->dev) {
890 struct virtio_scsi_event_ctx *ctx;
891
892 ctx = calloc(1, sizeof(*ctx));
893 if (!ctx) {
894 SPDK_ERRLOG("Error to allocate hotplug ctx\n");
895 /* This isn't fatal, just skip hotplug notification */
896 } else {
897 ctx->scsi_endpoint = scsi_endpoint;
898 ctx->scsi_target = scsi_target;
899 ctx->scsi_target_num = scsi_target_num;
900 spdk_thread_send_msg(virtio_endpoint->thread, vfu_virtio_scsi_lun_hotplug_msg, ctx);
901 }
902 }
903
904 return 0;
905 }
906
907 int
vfu_virtio_scsi_remove_target(const char * name,uint8_t scsi_target_num)908 vfu_virtio_scsi_remove_target(const char *name, uint8_t scsi_target_num)
909 {
910 struct spdk_vfu_endpoint *endpoint;
911 struct vfu_virtio_endpoint *virtio_endpoint;
912 struct virtio_scsi_endpoint *scsi_endpoint;
913 struct virtio_scsi_target *scsi_target;
914
915 endpoint = spdk_vfu_get_endpoint_by_name(name);
916 if (!endpoint) {
917 SPDK_ERRLOG("Endpoint %s doesn't exist\n", name);
918 return -ENOENT;
919 }
920 virtio_endpoint = spdk_vfu_get_endpoint_private(endpoint);
921 scsi_endpoint = to_scsi_endpoint(virtio_endpoint);
922
923 if (scsi_target_num >= VIRTIO_SCSI_CTRLR_MAX_TARGETS) {
924 SPDK_ERRLOG("Invalid SCSI target number, maximum SCSI target number is %u\n",
925 VIRTIO_SCSI_CTRLR_MAX_TARGETS - 1);
926 return -EINVAL;
927 }
928 scsi_target = &scsi_endpoint->targets[scsi_target_num];
929 if (!scsi_target->dev) {
930 SPDK_ERRLOG("SCSI Target %u doesn't exist\n", scsi_target_num);
931 return -ENOENT;
932 }
933
934 SPDK_NOTICELOG("%s: Remove SCSI target num %u\n", name, scsi_target_num);
935
936 if (virtio_endpoint->dev) {
937 struct virtio_scsi_event_ctx *ctx;
938
939 ctx = calloc(1, sizeof(*ctx));
940 if (!ctx) {
941 SPDK_ERRLOG("Error to allocate hotplug ctx\n");
942 /* This isn't fatal, just skip hotplug notification */
943 } else {
944 ctx->scsi_endpoint = scsi_endpoint;
945 ctx->scsi_target = scsi_target;
946 ctx->scsi_target_num = scsi_target_num;
947 spdk_thread_send_msg(scsi_endpoint->virtio.thread, vfu_virtio_scsi_lun_hotremove_msg, ctx);
948 }
949 } else {
950 spdk_scsi_dev_destruct(scsi_target->dev, NULL, NULL);
951 scsi_target->dev = NULL;
952 }
953
954 return 0;
955 }
956
957 static int
vfu_virtio_scsi_endpoint_destruct(struct spdk_vfu_endpoint * endpoint)958 vfu_virtio_scsi_endpoint_destruct(struct spdk_vfu_endpoint *endpoint)
959 {
960 struct vfu_virtio_endpoint *virtio_endpoint = spdk_vfu_get_endpoint_private(endpoint);
961 struct virtio_scsi_endpoint *scsi_endpoint = to_scsi_endpoint(virtio_endpoint);
962 struct virtio_scsi_target *scsi_target;
963 uint8_t i;
964
965 for (i = 0; i < VIRTIO_SCSI_CTRLR_MAX_TARGETS; i++) {
966 scsi_target = &scsi_endpoint->targets[i];
967 if (scsi_target->dev) {
968 spdk_scsi_dev_destruct(scsi_target->dev, NULL, NULL);
969 }
970 }
971
972 vfu_virtio_endpoint_destruct(&scsi_endpoint->virtio);
973 free(scsi_endpoint);
974
975 return 0;
976 }
977
978 static void *
vfu_virtio_scsi_endpoint_init(struct spdk_vfu_endpoint * endpoint,char * basename,const char * endpoint_name)979 vfu_virtio_scsi_endpoint_init(struct spdk_vfu_endpoint *endpoint,
980 char *basename, const char *endpoint_name)
981 {
982 struct virtio_scsi_endpoint *scsi_endpoint;
983 int ret;
984
985 scsi_endpoint = calloc(1, sizeof(*scsi_endpoint));
986 if (!scsi_endpoint) {
987 return NULL;
988 }
989
990 ret = vfu_virtio_endpoint_setup(&scsi_endpoint->virtio, endpoint, basename, endpoint_name,
991 &virtio_scsi_ops);
992 if (ret) {
993 SPDK_ERRLOG("Error to setup endpoint %s\n", endpoint_name);
994 free(scsi_endpoint);
995 return NULL;
996 }
997
998 virtio_scsi_update_config(scsi_endpoint);
999 return (void *)&scsi_endpoint->virtio;
1000 }
1001
1002 static int
vfu_virtio_scsi_get_device_info(struct spdk_vfu_endpoint * endpoint,struct spdk_vfu_pci_device * device_info)1003 vfu_virtio_scsi_get_device_info(struct spdk_vfu_endpoint *endpoint,
1004 struct spdk_vfu_pci_device *device_info)
1005 {
1006 struct vfu_virtio_endpoint *virtio_endpoint = spdk_vfu_get_endpoint_private(endpoint);
1007 struct virtio_scsi_endpoint *scsi_endpoint = to_scsi_endpoint(virtio_endpoint);
1008
1009 vfu_virtio_get_device_info(&scsi_endpoint->virtio, device_info);
1010 /* Fill Device ID */
1011 device_info->id.did = PCI_DEVICE_ID_VIRTIO_SCSI_MODERN;
1012
1013 return 0;
1014 }
1015
1016 struct spdk_vfu_endpoint_ops vfu_virtio_scsi_ops = {
1017 .name = "virtio_scsi",
1018 .init = vfu_virtio_scsi_endpoint_init,
1019 .get_device_info = vfu_virtio_scsi_get_device_info,
1020 .get_vendor_capability = vfu_virtio_get_vendor_capability,
1021 .post_memory_add = vfu_virtio_post_memory_add,
1022 .pre_memory_remove = vfu_virtio_pre_memory_remove,
1023 .reset_device = vfu_virtio_pci_reset_cb,
1024 .quiesce_device = vfu_virtio_quiesce_cb,
1025 .destruct = vfu_virtio_scsi_endpoint_destruct,
1026 .attach_device = vfu_virtio_attach_device,
1027 .detach_device = vfu_virtio_detach_device,
1028 };
1029
1030 static void
_vfu_virtio_scsi_pci_model_register(void)1031 __attribute__((constructor)) _vfu_virtio_scsi_pci_model_register(void)
1032 {
1033 spdk_vfu_register_endpoint_ops(&vfu_virtio_scsi_ops);
1034 }
1035
1036 SPDK_LOG_REGISTER_COMPONENT(vfu_virtio_scsi)
1037 SPDK_LOG_REGISTER_COMPONENT(vfu_virtio_scsi_data)
1038