xref: /openbsd-src/sys/dev/pv/vioscsi.c (revision d5abdd01d7a5f24fb6f9b0aab446ef59a9e9067a)
1 /*	$OpenBSD: vioscsi.c,v 1.32 2023/05/29 08:13:35 sf Exp $	*/
2 /*
3  * Copyright (c) 2013 Google Inc.
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/device.h>
21 #include <sys/mutex.h>
22 
23 #include <machine/bus.h>
24 #include <machine/intr.h>
25 
26 #include <dev/pv/vioscsireg.h>
27 #include <dev/pv/virtiovar.h>
28 
29 #include <scsi/scsi_all.h>
30 #include <scsi/scsiconf.h>
31 
32 enum { vioscsi_debug = 0 };
33 #define DPRINTF(f...) do { if (vioscsi_debug) printf(f); } while (0)
34 
35 /* Number of DMA segments for buffers that the device must support */
36 #define SEG_MAX		(MAXPHYS/PAGE_SIZE + 1)
37 /* In the virtqueue, we need space for header and footer, too */
38 #define ALLOC_SEGS	(SEG_MAX + 2)
39 
40 struct vioscsi_req {
41 	struct virtio_scsi_req_hdr	 vr_req;
42 	struct virtio_scsi_res_hdr	 vr_res;
43 	struct scsi_xfer		*vr_xs;
44 	bus_dmamap_t			 vr_control;
45 	bus_dmamap_t			 vr_data;
46 	SLIST_ENTRY(vioscsi_req)	 vr_list;
47 	int				 vr_qe_index;
48 };
49 
50 struct vioscsi_softc {
51 	struct device		 sc_dev;
52 	struct scsi_iopool	 sc_iopool;
53 	struct mutex		 sc_vr_mtx;
54 
55 	struct virtqueue	 sc_vqs[3];
56 	struct vioscsi_req	*sc_reqs;
57 	bus_dma_segment_t        sc_reqs_segs[1];
58 	SLIST_HEAD(, vioscsi_req) sc_freelist;
59 };
60 
61 int		 vioscsi_match(struct device *, void *, void *);
62 void		 vioscsi_attach(struct device *, struct device *, void *);
63 
64 int		 vioscsi_alloc_reqs(struct vioscsi_softc *,
65 		    struct virtio_softc *, int);
66 void		 vioscsi_scsi_cmd(struct scsi_xfer *);
67 int		 vioscsi_vq_done(struct virtqueue *);
68 void		 vioscsi_req_done(struct vioscsi_softc *, struct virtio_softc *,
69 		    struct vioscsi_req *);
70 void		*vioscsi_req_get(void *);
71 void		 vioscsi_req_put(void *, void *);
72 
73 const struct cfattach vioscsi_ca = {
74 	sizeof(struct vioscsi_softc),
75 	vioscsi_match,
76 	vioscsi_attach,
77 };
78 
79 struct cfdriver vioscsi_cd = {
80 	NULL, "vioscsi", DV_DULL,
81 };
82 
83 const struct scsi_adapter vioscsi_switch = {
84 	vioscsi_scsi_cmd, NULL, NULL, NULL, NULL
85 };
86 
87 const char *const vioscsi_vq_names[] = {
88 	"control",
89 	"event",
90 	"request",
91 };
92 
93 int
94 vioscsi_match(struct device *parent, void *self, void *aux)
95 {
96 	struct virtio_softc *va = (struct virtio_softc *)aux;
97 
98 	if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_SCSI)
99 		return (1);
100 	return (0);
101 }
102 
103 void
104 vioscsi_attach(struct device *parent, struct device *self, void *aux)
105 {
106 	struct virtio_softc *vsc = (struct virtio_softc *)parent;
107 	struct vioscsi_softc *sc = (struct vioscsi_softc *)self;
108 	struct scsibus_attach_args saa;
109 	int i, rv;
110 
111 	if (vsc->sc_child != NULL) {
112 		printf(": parent already has a child\n");
113 		return;
114 	}
115 	vsc->sc_child = &sc->sc_dev;
116 	vsc->sc_ipl = IPL_BIO;
117 
118 	// TODO(matthew): Negotiate hotplug.
119 
120 	vsc->sc_vqs = sc->sc_vqs;
121 	vsc->sc_nvqs = nitems(sc->sc_vqs);
122 
123 	virtio_negotiate_features(vsc, NULL);
124 	uint32_t cmd_per_lun = virtio_read_device_config_4(vsc,
125 	    VIRTIO_SCSI_CONFIG_CMD_PER_LUN);
126 	uint32_t seg_max = virtio_read_device_config_4(vsc,
127 	    VIRTIO_SCSI_CONFIG_SEG_MAX);
128 	uint16_t max_target = virtio_read_device_config_2(vsc,
129 	    VIRTIO_SCSI_CONFIG_MAX_TARGET);
130 
131 	if (seg_max < SEG_MAX) {
132 		printf("\nMax number of segments %d too small\n", seg_max);
133 		goto err;
134 	}
135 
136 	for (i = 0; i < nitems(sc->sc_vqs); i++) {
137 		rv = virtio_alloc_vq(vsc, &sc->sc_vqs[i], i, MAXPHYS,
138 		    ALLOC_SEGS, vioscsi_vq_names[i]);
139 		if (rv) {
140 			printf(": failed to allocate virtqueue %d\n", i);
141 			goto err;
142 		}
143 		sc->sc_vqs[i].vq_done = vioscsi_vq_done;
144 	}
145 
146 	int qsize = sc->sc_vqs[2].vq_num;
147 	printf(": qsize %d\n", qsize);
148 
149 	SLIST_INIT(&sc->sc_freelist);
150 	mtx_init(&sc->sc_vr_mtx, IPL_BIO);
151 	scsi_iopool_init(&sc->sc_iopool, sc, vioscsi_req_get, vioscsi_req_put);
152 
153 	int nreqs = vioscsi_alloc_reqs(sc, vsc, qsize);
154 	if (nreqs == 0) {
155 		printf("\nCan't alloc reqs\n");
156 		goto err;
157 	}
158 
159 	saa.saa_adapter = &vioscsi_switch;
160 	saa.saa_adapter_softc = sc;
161 	saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
162 	saa.saa_adapter_buswidth = max_target;
163 	saa.saa_luns = 8;
164 	saa.saa_openings = (nreqs > cmd_per_lun) ? cmd_per_lun : nreqs;
165 	saa.saa_pool = &sc->sc_iopool;
166 	saa.saa_quirks = saa.saa_flags = 0;
167 	saa.saa_wwpn = saa.saa_wwnn = 0;
168 
169 	virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
170 	config_found(self, &saa, scsiprint);
171 	return;
172 
173 err:
174 	vsc->sc_child = VIRTIO_CHILD_ERROR;
175 	return;
176 }
177 
178 void
179 vioscsi_scsi_cmd(struct scsi_xfer *xs)
180 {
181 	struct vioscsi_softc *sc = xs->sc_link->bus->sb_adapter_softc;
182 	struct virtio_softc *vsc = (struct virtio_softc *)sc->sc_dev.dv_parent;
183 	struct vioscsi_req *vr = xs->io;
184 	struct virtio_scsi_req_hdr *req = &vr->vr_req;
185 	struct virtqueue *vq = &sc->sc_vqs[2];
186 	int slot = vr->vr_qe_index;
187 
188 	DPRINTF("vioscsi_scsi_cmd: enter\n");
189 
190 	// TODO(matthew): Support bidirectional SCSI commands?
191 	if ((xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT))
192 	    == (SCSI_DATA_IN | SCSI_DATA_OUT)) {
193 		goto stuffup;
194 	}
195 
196 	vr->vr_xs = xs;
197 
198 	/*
199 	 * "The only supported format for the LUN field is: first byte set to
200 	 * 1, second byte set to target, third and fourth byte representing a
201 	 * single level LUN structure, followed by four zero bytes."
202 	 */
203 	if (xs->sc_link->target >= 256 || xs->sc_link->lun >= 16384)
204 		goto stuffup;
205 	req->lun[0] = 1;
206 	req->lun[1] = xs->sc_link->target;
207 	req->lun[2] = 0x40 | (xs->sc_link->lun >> 8);
208 	req->lun[3] = xs->sc_link->lun;
209 	memset(req->lun + 4, 0, 4);
210 
211 	if ((size_t)xs->cmdlen > sizeof(req->cdb))
212 		goto stuffup;
213 	memset(req->cdb, 0, sizeof(req->cdb));
214 	memcpy(req->cdb, &xs->cmd, xs->cmdlen);
215 
216 	int isread = !!(xs->flags & SCSI_DATA_IN);
217 
218 	int nsegs = 2;
219 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
220 		if (bus_dmamap_load(vsc->sc_dmat, vr->vr_data,
221 		    xs->data, xs->datalen, NULL,
222 		    ((isread ? BUS_DMA_READ : BUS_DMA_WRITE) |
223 		     BUS_DMA_NOWAIT)))
224 			goto stuffup;
225 		nsegs += vr->vr_data->dm_nsegs;
226 	}
227 
228 	/*
229 	 * Adjust reservation to the number needed, or virtio gets upset. Note
230 	 * that it may trim UP if 'xs' is being recycled w/o getting a new
231 	 * reservation!
232 	 */
233 	int s = splbio();
234 	virtio_enqueue_trim(vq, slot, nsegs);
235 	splx(s);
236 
237 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
238 	    offsetof(struct vioscsi_req, vr_req),
239 	    sizeof(struct virtio_scsi_req_hdr),
240 	    BUS_DMASYNC_PREWRITE);
241 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
242 	    offsetof(struct vioscsi_req, vr_res),
243             sizeof(struct virtio_scsi_res_hdr),
244 	    BUS_DMASYNC_PREREAD);
245 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT))
246 		bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen,
247 		    isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
248 
249 	s = splbio();
250 	virtio_enqueue_p(vq, slot, vr->vr_control,
251 	    offsetof(struct vioscsi_req, vr_req),
252             sizeof(struct virtio_scsi_req_hdr),
253 	    1);
254 	if (xs->flags & SCSI_DATA_OUT)
255 		virtio_enqueue(vq, slot, vr->vr_data, 1);
256 	virtio_enqueue_p(vq, slot, vr->vr_control,
257 	    offsetof(struct vioscsi_req, vr_res),
258             sizeof(struct virtio_scsi_res_hdr),
259 	    0);
260 	if (xs->flags & SCSI_DATA_IN)
261 		virtio_enqueue(vq, slot, vr->vr_data, 0);
262 
263 	virtio_enqueue_commit(vsc, vq, slot, 1);
264 
265 	if (ISSET(xs->flags, SCSI_POLL)) {
266 		DPRINTF("vioscsi_scsi_cmd: polling...\n");
267 		int timeout = 1000;
268 		do {
269 			virtio_poll_intr(vsc);
270 			if (vr->vr_xs != xs)
271 				break;
272 			delay(1000);
273 		} while (--timeout > 0);
274 		if (vr->vr_xs == xs) {
275 			// TODO(matthew): Abort the request.
276 			xs->error = XS_TIMEOUT;
277 			xs->resid = xs->datalen;
278 			DPRINTF("vioscsi_scsi_cmd: polling timeout\n");
279 			scsi_done(xs);
280 		}
281 		DPRINTF("vioscsi_scsi_cmd: done (timeout=%d)\n", timeout);
282 	}
283 	splx(s);
284 	return;
285 
286 stuffup:
287 	xs->error = XS_DRIVER_STUFFUP;
288 	xs->resid = xs->datalen;
289 	DPRINTF("vioscsi_scsi_cmd: stuffup\n");
290 	scsi_done(xs);
291 }
292 
293 void
294 vioscsi_req_done(struct vioscsi_softc *sc, struct virtio_softc *vsc,
295     struct vioscsi_req *vr)
296 {
297 	struct scsi_xfer *xs = vr->vr_xs;
298 	DPRINTF("vioscsi_req_done: enter vr: %p xs: %p\n", vr, xs);
299 
300 	int isread = !!(xs->flags & SCSI_DATA_IN);
301 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
302 	    offsetof(struct vioscsi_req, vr_req),
303 	    sizeof(struct virtio_scsi_req_hdr),
304 	    BUS_DMASYNC_POSTWRITE);
305 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
306 	    offsetof(struct vioscsi_req, vr_res),
307 	    sizeof(struct virtio_scsi_res_hdr),
308 	    BUS_DMASYNC_POSTREAD);
309 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
310 		bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen,
311 		    isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
312 		bus_dmamap_unload(vsc->sc_dmat, vr->vr_data);
313 	}
314 
315 	if (vr->vr_res.response != VIRTIO_SCSI_S_OK) {
316 		xs->error = XS_DRIVER_STUFFUP;
317 		xs->resid = xs->datalen;
318 		DPRINTF("vioscsi_req_done: stuffup: %d\n", vr->vr_res.response);
319 		goto done;
320 	}
321 
322 	size_t sense_len = MIN(sizeof(xs->sense), vr->vr_res.sense_len);
323 	memcpy(&xs->sense, vr->vr_res.sense, sense_len);
324 	xs->error = (sense_len == 0) ? XS_NOERROR : XS_SENSE;
325 
326 	xs->status = vr->vr_res.status;
327 	xs->resid = vr->vr_res.residual;
328 
329 	DPRINTF("vioscsi_req_done: done %d, %d, %zd\n",
330 	    xs->error, xs->status, xs->resid);
331 
332 done:
333 	vr->vr_xs = NULL;
334 	scsi_done(xs);
335 }
336 
337 int
338 vioscsi_vq_done(struct virtqueue *vq)
339 {
340 	struct virtio_softc *vsc = vq->vq_owner;
341 	struct vioscsi_softc *sc = (struct vioscsi_softc *)vsc->sc_child;
342 	struct vq_entry *qe;
343 	struct vioscsi_req *vr;
344 	int ret = 0;
345 
346 	DPRINTF("vioscsi_vq_done: enter\n");
347 
348 	for (;;) {
349 		int r, s, slot;
350 		s = splbio();
351 		r = virtio_dequeue(vsc, vq, &slot, NULL);
352 		splx(s);
353 		if (r != 0)
354 			break;
355 
356 		DPRINTF("vioscsi_vq_done: slot=%d\n", slot);
357 		qe = &vq->vq_entries[slot];
358 		vr = &sc->sc_reqs[qe->qe_vr_index];
359 		vioscsi_req_done(sc, vsc, vr);
360 		ret = 1;
361 	}
362 
363 	DPRINTF("vioscsi_vq_done: exit %d\n", ret);
364 
365 	return (ret);
366 }
367 
368 /*
369  * vioscso_req_get() provides the SCSI layer with all the
370  * resources necessary to start an I/O on the device.
371  *
372  * Since the size of the I/O is unknown at this time the
373  * resources allocated (a.k.a. reserved) must be sufficient
374  * to allow the maximum possible I/O size.
375  *
376  * When the I/O is actually attempted via vioscsi_scsi_cmd()
377  * excess resources will be returned via virtio_enqueue_trim().
378  */
379 void *
380 vioscsi_req_get(void *cookie)
381 {
382 	struct vioscsi_softc *sc = cookie;
383 	struct vioscsi_req *vr = NULL;
384 
385 	mtx_enter(&sc->sc_vr_mtx);
386 	vr = SLIST_FIRST(&sc->sc_freelist);
387 	if (vr != NULL)
388 		SLIST_REMOVE_HEAD(&sc->sc_freelist, vr_list);
389 	mtx_leave(&sc->sc_vr_mtx);
390 
391 	DPRINTF("vioscsi_req_get: %p\n", vr);
392 
393 	return (vr);
394 }
395 
396 void
397 vioscsi_req_put(void *cookie, void *io)
398 {
399 	struct vioscsi_softc *sc = cookie;
400 	struct vioscsi_req *vr = io;
401 
402 	DPRINTF("vioscsi_req_put: %p\n", vr);
403 
404 	mtx_enter(&sc->sc_vr_mtx);
405 	/*
406 	 * Do *NOT* call virtio_dequeue_commit()!
407 	 *
408 	 * Descriptors are permanently associated with the vioscsi_req and
409 	 * should not be placed on the free list!
410 	 */
411 	SLIST_INSERT_HEAD(&sc->sc_freelist, vr, vr_list);
412 	mtx_leave(&sc->sc_vr_mtx);
413 }
414 
415 int
416 vioscsi_alloc_reqs(struct vioscsi_softc *sc, struct virtio_softc *vsc,
417     int qsize)
418 {
419 	struct virtqueue *vq = &sc->sc_vqs[2];
420 	struct vioscsi_req *vr;
421 	struct vring_desc *vd;
422 	size_t allocsize;
423 	int i, r, nreqs, rsegs, slot;
424 	void *vaddr;
425 
426 	if (vq->vq_indirect != NULL)
427 		nreqs = qsize;
428 	else
429 		nreqs = qsize / ALLOC_SEGS;
430 
431 	allocsize = nreqs * sizeof(struct vioscsi_req);
432 	r = bus_dmamem_alloc(vsc->sc_dmat, allocsize, 0, 0,
433 	    &sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
434 	if (r != 0) {
435 		printf("bus_dmamem_alloc, size %zd, error %d\n",
436 		    allocsize, r);
437 		return 0;
438 	}
439 	r = bus_dmamem_map(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1,
440 	    allocsize, (caddr_t *)&vaddr, BUS_DMA_NOWAIT);
441 	if (r != 0) {
442 		printf("bus_dmamem_map failed, error %d\n", r);
443 		bus_dmamem_free(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1);
444 		return 0;
445 	}
446 	sc->sc_reqs = vaddr;
447 	memset(vaddr, 0, allocsize);
448 
449 	for (i = 0; i < nreqs; i++) {
450 		/*
451 		 * Assign descriptors and create the DMA maps for each
452 		 * allocated request.
453 		 */
454 		vr = &sc->sc_reqs[i];
455 		r = virtio_enqueue_prep(vq, &slot);
456 		if (r == 0)
457 			r = virtio_enqueue_reserve(vq, slot, ALLOC_SEGS);
458 		if (r != 0)
459 			return i;
460 
461 		if (vq->vq_indirect == NULL) {
462 			/*
463 			 * The reserved slots must be a contiguous block
464 			 * starting at vq_desc[slot].
465 			 */
466 			vd = &vq->vq_desc[slot];
467 			for (r = 0; r < ALLOC_SEGS - 1; r++) {
468 				DPRINTF("vd[%d].next = %d should be %d\n",
469 				    r, vd[r].next, (slot + r + 1));
470 				if (vd[r].next != (slot + r + 1))
471 					return i;
472 			}
473 			if (r == (ALLOC_SEGS -1) && vd[r].next != 0)
474 				return i;
475 			DPRINTF("Reserved slots are contiguous as required!\n");
476 		}
477 
478 		vr->vr_qe_index = slot;
479 		vr->vr_req.id = slot;
480 		vr->vr_req.task_attr = VIRTIO_SCSI_S_SIMPLE;
481 		vq->vq_entries[slot].qe_vr_index = i;
482 
483 		r = bus_dmamap_create(vsc->sc_dmat,
484 		    offsetof(struct vioscsi_req, vr_xs), 1,
485 		    offsetof(struct vioscsi_req, vr_xs), 0,
486 		    BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_control);
487 		if (r != 0) {
488 			printf("bus_dmamap_create vr_control failed, error  %d\n", r);
489 			return i;
490 		}
491 		r = bus_dmamap_create(vsc->sc_dmat, MAXPHYS, SEG_MAX,
492 		    MAXPHYS, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_data);
493 		if (r != 0) {
494 			printf("bus_dmamap_create vr_data failed, error %d\n", r );
495 			return i;
496 		}
497 		r = bus_dmamap_load(vsc->sc_dmat, vr->vr_control,
498 		    vr, offsetof(struct vioscsi_req, vr_xs), NULL,
499 		    BUS_DMA_NOWAIT);
500 		if (r != 0) {
501 			printf("bus_dmamap_load vr_control failed, error %d\n", r );
502 			return i;
503 		}
504 
505 		SLIST_INSERT_HEAD(&sc->sc_freelist, vr, vr_list);
506 	}
507 
508 	return nreqs;
509 }
510