xref: /openbsd-src/sys/dev/pv/vioscsi.c (revision 77d0f8231d53e9aa7f66fbc7e23f42a854c10ff0)
1 /*	$OpenBSD: vioscsi.c,v 1.35 2024/12/20 22:18:27 sf Exp $	*/
2 /*
3  * Copyright (c) 2013 Google Inc.
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/device.h>
21 #include <sys/mutex.h>
22 
23 #include <machine/bus.h>
24 #include <machine/intr.h>
25 
26 #include <dev/pv/vioscsireg.h>
27 #include <dev/pv/virtiovar.h>
28 
29 #include <scsi/scsi_all.h>
30 #include <scsi/scsiconf.h>
31 
32 enum { vioscsi_debug = 0 };
33 #define DPRINTF(f...) do { if (vioscsi_debug) printf(f); } while (0)
34 
35 /* Number of DMA segments for buffers that the device must support */
36 #define SEG_MAX		(MAXPHYS/PAGE_SIZE + 1)
37 /* In the virtqueue, we need space for header and footer, too */
38 #define ALLOC_SEGS	(SEG_MAX + 2)
39 
40 struct vioscsi_req {
41 	struct virtio_scsi_req_hdr	 vr_req;
42 	struct virtio_scsi_res_hdr	 vr_res;
43 	struct scsi_xfer		*vr_xs;
44 	bus_dmamap_t			 vr_control;
45 	bus_dmamap_t			 vr_data;
46 	SLIST_ENTRY(vioscsi_req)	 vr_list;
47 	int				 vr_qe_index;
48 };
49 
50 struct vioscsi_softc {
51 	struct device		 sc_dev;
52 	struct scsi_iopool	 sc_iopool;
53 	struct mutex		 sc_vr_mtx;
54 
55 	struct virtqueue	 sc_vqs[3];
56 	struct vioscsi_req	*sc_reqs;
57 	bus_dma_segment_t        sc_reqs_segs[1];
58 	SLIST_HEAD(, vioscsi_req) sc_freelist;
59 };
60 
61 int		 vioscsi_match(struct device *, void *, void *);
62 void		 vioscsi_attach(struct device *, struct device *, void *);
63 
64 int		 vioscsi_alloc_reqs(struct vioscsi_softc *,
65 		    struct virtio_softc *, int);
66 void		 vioscsi_scsi_cmd(struct scsi_xfer *);
67 int		 vioscsi_vq_done(struct virtqueue *);
68 void		 vioscsi_req_done(struct vioscsi_softc *, struct virtio_softc *,
69 		    struct vioscsi_req *);
70 void		*vioscsi_req_get(void *);
71 void		 vioscsi_req_put(void *, void *);
72 
73 const struct cfattach vioscsi_ca = {
74 	sizeof(struct vioscsi_softc),
75 	vioscsi_match,
76 	vioscsi_attach,
77 };
78 
79 struct cfdriver vioscsi_cd = {
80 	NULL, "vioscsi", DV_DULL,
81 };
82 
83 const struct scsi_adapter vioscsi_switch = {
84 	vioscsi_scsi_cmd, NULL, NULL, NULL, NULL
85 };
86 
87 const char *const vioscsi_vq_names[] = {
88 	"control",
89 	"event",
90 	"request",
91 };
92 
93 int
94 vioscsi_match(struct device *parent, void *self, void *aux)
95 {
96 	struct virtio_attach_args *va = aux;
97 
98 	if (va->va_devid == PCI_PRODUCT_VIRTIO_SCSI)
99 		return (1);
100 	return (0);
101 }
102 
103 void
104 vioscsi_attach(struct device *parent, struct device *self, void *aux)
105 {
106 	struct virtio_softc *vsc = (struct virtio_softc *)parent;
107 	struct vioscsi_softc *sc = (struct vioscsi_softc *)self;
108 	struct virtio_attach_args *va = aux;
109 	struct scsibus_attach_args saa;
110 	int i, rv;
111 
112 	if (vsc->sc_child != NULL) {
113 		printf(": parent already has a child\n");
114 		return;
115 	}
116 	vsc->sc_child = &sc->sc_dev;
117 	vsc->sc_ipl = IPL_BIO;
118 
119 	// TODO(matthew): Negotiate hotplug.
120 
121 	vsc->sc_vqs = sc->sc_vqs;
122 	vsc->sc_nvqs = nitems(sc->sc_vqs);
123 
124 	if (virtio_negotiate_features(vsc, NULL) != 0)
125 		goto err;
126 	uint32_t cmd_per_lun = virtio_read_device_config_4(vsc,
127 	    VIRTIO_SCSI_CONFIG_CMD_PER_LUN);
128 	uint32_t seg_max = virtio_read_device_config_4(vsc,
129 	    VIRTIO_SCSI_CONFIG_SEG_MAX);
130 	uint16_t max_target = virtio_read_device_config_2(vsc,
131 	    VIRTIO_SCSI_CONFIG_MAX_TARGET);
132 
133 	if (seg_max < SEG_MAX) {
134 		printf("\nMax number of segments %d too small\n", seg_max);
135 		goto err;
136 	}
137 
138 	for (i = 0; i < nitems(sc->sc_vqs); i++) {
139 		rv = virtio_alloc_vq(vsc, &sc->sc_vqs[i], i, ALLOC_SEGS,
140 		    vioscsi_vq_names[i]);
141 		if (rv) {
142 			printf(": failed to allocate virtqueue %d\n", i);
143 			goto err;
144 		}
145 		sc->sc_vqs[i].vq_done = vioscsi_vq_done;
146 	}
147 
148 	int qsize = sc->sc_vqs[2].vq_num;
149 	printf(": qsize %d\n", qsize);
150 
151 	SLIST_INIT(&sc->sc_freelist);
152 	mtx_init(&sc->sc_vr_mtx, IPL_BIO);
153 	scsi_iopool_init(&sc->sc_iopool, sc, vioscsi_req_get, vioscsi_req_put);
154 
155 	int nreqs = vioscsi_alloc_reqs(sc, vsc, qsize);
156 	if (nreqs == 0) {
157 		printf("\nCan't alloc reqs\n");
158 		goto err;
159 	}
160 
161 	saa.saa_adapter = &vioscsi_switch;
162 	saa.saa_adapter_softc = sc;
163 	saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
164 	saa.saa_adapter_buswidth = max_target;
165 	saa.saa_luns = 8;
166 	saa.saa_openings = (nreqs > cmd_per_lun) ? cmd_per_lun : nreqs;
167 	saa.saa_pool = &sc->sc_iopool;
168 	saa.saa_quirks = saa.saa_flags = 0;
169 	saa.saa_wwpn = saa.saa_wwnn = 0;
170 
171 	if (virtio_attach_finish(vsc, va) != 0)
172 		goto err;
173 	config_found(self, &saa, scsiprint);
174 	return;
175 
176 err:
177 	vsc->sc_child = VIRTIO_CHILD_ERROR;
178 	return;
179 }
180 
181 void
182 vioscsi_scsi_cmd(struct scsi_xfer *xs)
183 {
184 	struct vioscsi_softc *sc = xs->sc_link->bus->sb_adapter_softc;
185 	struct virtio_softc *vsc = (struct virtio_softc *)sc->sc_dev.dv_parent;
186 	struct vioscsi_req *vr = xs->io;
187 	struct virtio_scsi_req_hdr *req = &vr->vr_req;
188 	struct virtqueue *vq = &sc->sc_vqs[2];
189 	int slot = vr->vr_qe_index;
190 
191 	DPRINTF("vioscsi_scsi_cmd: enter\n");
192 
193 	// TODO(matthew): Support bidirectional SCSI commands?
194 	if ((xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT))
195 	    == (SCSI_DATA_IN | SCSI_DATA_OUT)) {
196 		goto stuffup;
197 	}
198 
199 	vr->vr_xs = xs;
200 
201 	/*
202 	 * "The only supported format for the LUN field is: first byte set to
203 	 * 1, second byte set to target, third and fourth byte representing a
204 	 * single level LUN structure, followed by four zero bytes."
205 	 */
206 	if (xs->sc_link->target >= 256 || xs->sc_link->lun >= 16384)
207 		goto stuffup;
208 	req->lun[0] = 1;
209 	req->lun[1] = xs->sc_link->target;
210 	req->lun[2] = 0x40 | (xs->sc_link->lun >> 8);
211 	req->lun[3] = xs->sc_link->lun;
212 	memset(req->lun + 4, 0, 4);
213 
214 	if ((size_t)xs->cmdlen > sizeof(req->cdb))
215 		goto stuffup;
216 	memset(req->cdb, 0, sizeof(req->cdb));
217 	memcpy(req->cdb, &xs->cmd, xs->cmdlen);
218 
219 	int isread = !!(xs->flags & SCSI_DATA_IN);
220 
221 	int nsegs = 2;
222 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
223 		if (bus_dmamap_load(vsc->sc_dmat, vr->vr_data,
224 		    xs->data, xs->datalen, NULL,
225 		    ((isread ? BUS_DMA_READ : BUS_DMA_WRITE) |
226 		     BUS_DMA_NOWAIT)))
227 			goto stuffup;
228 		nsegs += vr->vr_data->dm_nsegs;
229 	}
230 
231 	/*
232 	 * Adjust reservation to the number needed, or virtio gets upset. Note
233 	 * that it may trim UP if 'xs' is being recycled w/o getting a new
234 	 * reservation!
235 	 */
236 	int s = splbio();
237 	virtio_enqueue_trim(vq, slot, nsegs);
238 	splx(s);
239 
240 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
241 	    offsetof(struct vioscsi_req, vr_req),
242 	    sizeof(struct virtio_scsi_req_hdr),
243 	    BUS_DMASYNC_PREWRITE);
244 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
245 	    offsetof(struct vioscsi_req, vr_res),
246             sizeof(struct virtio_scsi_res_hdr),
247 	    BUS_DMASYNC_PREREAD);
248 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT))
249 		bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen,
250 		    isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
251 
252 	s = splbio();
253 	virtio_enqueue_p(vq, slot, vr->vr_control,
254 	    offsetof(struct vioscsi_req, vr_req),
255             sizeof(struct virtio_scsi_req_hdr),
256 	    1);
257 	if (xs->flags & SCSI_DATA_OUT)
258 		virtio_enqueue(vq, slot, vr->vr_data, 1);
259 	virtio_enqueue_p(vq, slot, vr->vr_control,
260 	    offsetof(struct vioscsi_req, vr_res),
261             sizeof(struct virtio_scsi_res_hdr),
262 	    0);
263 	if (xs->flags & SCSI_DATA_IN)
264 		virtio_enqueue(vq, slot, vr->vr_data, 0);
265 
266 	virtio_enqueue_commit(vsc, vq, slot, 1);
267 
268 	if (ISSET(xs->flags, SCSI_POLL)) {
269 		DPRINTF("vioscsi_scsi_cmd: polling...\n");
270 		int timeout = 1000;
271 		do {
272 			virtio_poll_intr(vsc);
273 			if (vr->vr_xs != xs)
274 				break;
275 			delay(1000);
276 		} while (--timeout > 0);
277 		if (vr->vr_xs == xs) {
278 			// TODO(matthew): Abort the request.
279 			xs->error = XS_TIMEOUT;
280 			xs->resid = xs->datalen;
281 			DPRINTF("vioscsi_scsi_cmd: polling timeout\n");
282 			scsi_done(xs);
283 		}
284 		DPRINTF("vioscsi_scsi_cmd: done (timeout=%d)\n", timeout);
285 	}
286 	splx(s);
287 	return;
288 
289 stuffup:
290 	xs->error = XS_DRIVER_STUFFUP;
291 	xs->resid = xs->datalen;
292 	DPRINTF("vioscsi_scsi_cmd: stuffup\n");
293 	scsi_done(xs);
294 }
295 
296 void
297 vioscsi_req_done(struct vioscsi_softc *sc, struct virtio_softc *vsc,
298     struct vioscsi_req *vr)
299 {
300 	struct scsi_xfer *xs = vr->vr_xs;
301 	DPRINTF("vioscsi_req_done: enter vr: %p xs: %p\n", vr, xs);
302 
303 	int isread = !!(xs->flags & SCSI_DATA_IN);
304 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
305 	    offsetof(struct vioscsi_req, vr_req),
306 	    sizeof(struct virtio_scsi_req_hdr),
307 	    BUS_DMASYNC_POSTWRITE);
308 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
309 	    offsetof(struct vioscsi_req, vr_res),
310 	    sizeof(struct virtio_scsi_res_hdr),
311 	    BUS_DMASYNC_POSTREAD);
312 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
313 		bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen,
314 		    isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
315 		bus_dmamap_unload(vsc->sc_dmat, vr->vr_data);
316 	}
317 
318 	if (vr->vr_res.response != VIRTIO_SCSI_S_OK) {
319 		xs->error = XS_DRIVER_STUFFUP;
320 		xs->resid = xs->datalen;
321 		DPRINTF("vioscsi_req_done: stuffup: %d\n", vr->vr_res.response);
322 		goto done;
323 	}
324 
325 	size_t sense_len = MIN(sizeof(xs->sense), vr->vr_res.sense_len);
326 	memcpy(&xs->sense, vr->vr_res.sense, sense_len);
327 	xs->error = (sense_len == 0) ? XS_NOERROR : XS_SENSE;
328 
329 	xs->status = vr->vr_res.status;
330 	xs->resid = vr->vr_res.residual;
331 
332 	DPRINTF("vioscsi_req_done: done %d, %d, %zd\n",
333 	    xs->error, xs->status, xs->resid);
334 
335 done:
336 	vr->vr_xs = NULL;
337 	scsi_done(xs);
338 }
339 
340 int
341 vioscsi_vq_done(struct virtqueue *vq)
342 {
343 	struct virtio_softc *vsc = vq->vq_owner;
344 	struct vioscsi_softc *sc = (struct vioscsi_softc *)vsc->sc_child;
345 	struct vq_entry *qe;
346 	struct vioscsi_req *vr;
347 	int ret = 0;
348 
349 	DPRINTF("vioscsi_vq_done: enter\n");
350 
351 	for (;;) {
352 		int r, s, slot;
353 		s = splbio();
354 		r = virtio_dequeue(vsc, vq, &slot, NULL);
355 		splx(s);
356 		if (r != 0)
357 			break;
358 
359 		DPRINTF("vioscsi_vq_done: slot=%d\n", slot);
360 		qe = &vq->vq_entries[slot];
361 		vr = &sc->sc_reqs[qe->qe_vr_index];
362 		vioscsi_req_done(sc, vsc, vr);
363 		ret = 1;
364 	}
365 
366 	DPRINTF("vioscsi_vq_done: exit %d\n", ret);
367 
368 	return (ret);
369 }
370 
371 /*
372  * vioscso_req_get() provides the SCSI layer with all the
373  * resources necessary to start an I/O on the device.
374  *
375  * Since the size of the I/O is unknown at this time the
376  * resources allocated (a.k.a. reserved) must be sufficient
377  * to allow the maximum possible I/O size.
378  *
379  * When the I/O is actually attempted via vioscsi_scsi_cmd()
380  * excess resources will be returned via virtio_enqueue_trim().
381  */
382 void *
383 vioscsi_req_get(void *cookie)
384 {
385 	struct vioscsi_softc *sc = cookie;
386 	struct vioscsi_req *vr = NULL;
387 
388 	mtx_enter(&sc->sc_vr_mtx);
389 	vr = SLIST_FIRST(&sc->sc_freelist);
390 	if (vr != NULL)
391 		SLIST_REMOVE_HEAD(&sc->sc_freelist, vr_list);
392 	mtx_leave(&sc->sc_vr_mtx);
393 
394 	DPRINTF("vioscsi_req_get: %p\n", vr);
395 
396 	return (vr);
397 }
398 
399 void
400 vioscsi_req_put(void *cookie, void *io)
401 {
402 	struct vioscsi_softc *sc = cookie;
403 	struct vioscsi_req *vr = io;
404 
405 	DPRINTF("vioscsi_req_put: %p\n", vr);
406 
407 	mtx_enter(&sc->sc_vr_mtx);
408 	/*
409 	 * Do *NOT* call virtio_dequeue_commit()!
410 	 *
411 	 * Descriptors are permanently associated with the vioscsi_req and
412 	 * should not be placed on the free list!
413 	 */
414 	SLIST_INSERT_HEAD(&sc->sc_freelist, vr, vr_list);
415 	mtx_leave(&sc->sc_vr_mtx);
416 }
417 
418 int
419 vioscsi_alloc_reqs(struct vioscsi_softc *sc, struct virtio_softc *vsc,
420     int qsize)
421 {
422 	struct virtqueue *vq = &sc->sc_vqs[2];
423 	struct vioscsi_req *vr;
424 	struct vring_desc *vd;
425 	size_t allocsize;
426 	int i, r, nreqs, rsegs, slot;
427 	void *vaddr;
428 
429 	if (vq->vq_indirect != NULL)
430 		nreqs = qsize;
431 	else
432 		nreqs = qsize / ALLOC_SEGS;
433 
434 	allocsize = nreqs * sizeof(struct vioscsi_req);
435 	r = bus_dmamem_alloc(vsc->sc_dmat, allocsize, 0, 0,
436 	    &sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
437 	if (r != 0) {
438 		printf("bus_dmamem_alloc, size %zd, error %d\n",
439 		    allocsize, r);
440 		return 0;
441 	}
442 	r = bus_dmamem_map(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1,
443 	    allocsize, (caddr_t *)&vaddr, BUS_DMA_NOWAIT);
444 	if (r != 0) {
445 		printf("bus_dmamem_map failed, error %d\n", r);
446 		bus_dmamem_free(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1);
447 		return 0;
448 	}
449 	sc->sc_reqs = vaddr;
450 	memset(vaddr, 0, allocsize);
451 
452 	for (i = 0; i < nreqs; i++) {
453 		/*
454 		 * Assign descriptors and create the DMA maps for each
455 		 * allocated request.
456 		 */
457 		vr = &sc->sc_reqs[i];
458 		r = virtio_enqueue_prep(vq, &slot);
459 		if (r == 0)
460 			r = virtio_enqueue_reserve(vq, slot, ALLOC_SEGS);
461 		if (r != 0)
462 			return i;
463 
464 		if (vq->vq_indirect == NULL) {
465 			/*
466 			 * The reserved slots must be a contiguous block
467 			 * starting at vq_desc[slot].
468 			 */
469 			vd = &vq->vq_desc[slot];
470 			for (r = 0; r < ALLOC_SEGS - 1; r++) {
471 				DPRINTF("vd[%d].next = %d should be %d\n",
472 				    r, vd[r].next, (slot + r + 1));
473 				if (vd[r].next != (slot + r + 1))
474 					return i;
475 			}
476 			if (r == (ALLOC_SEGS -1) && vd[r].next != 0)
477 				return i;
478 			DPRINTF("Reserved slots are contiguous as required!\n");
479 		}
480 
481 		vr->vr_qe_index = slot;
482 		vr->vr_req.id = slot;
483 		vr->vr_req.task_attr = VIRTIO_SCSI_S_SIMPLE;
484 		vq->vq_entries[slot].qe_vr_index = i;
485 
486 		r = bus_dmamap_create(vsc->sc_dmat,
487 		    offsetof(struct vioscsi_req, vr_xs), 1,
488 		    offsetof(struct vioscsi_req, vr_xs), 0,
489 		    BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_control);
490 		if (r != 0) {
491 			printf("bus_dmamap_create vr_control failed, error  %d\n", r);
492 			return i;
493 		}
494 		r = bus_dmamap_create(vsc->sc_dmat, MAXPHYS, SEG_MAX,
495 		    MAXPHYS, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_data);
496 		if (r != 0) {
497 			printf("bus_dmamap_create vr_data failed, error %d\n", r );
498 			return i;
499 		}
500 		r = bus_dmamap_load(vsc->sc_dmat, vr->vr_control,
501 		    vr, offsetof(struct vioscsi_req, vr_xs), NULL,
502 		    BUS_DMA_NOWAIT);
503 		if (r != 0) {
504 			printf("bus_dmamap_load vr_control failed, error %d\n", r );
505 			return i;
506 		}
507 
508 		SLIST_INSERT_HEAD(&sc->sc_freelist, vr, vr_list);
509 	}
510 
511 	return nreqs;
512 }
513