xref: /openbsd-src/sys/dev/pv/vioscsi.c (revision c1a45aed656e7d5627c30c92421893a76f370ccb)
1 /*	$OpenBSD: vioscsi.c,v 1.30 2022/04/16 19:19:59 naddy Exp $	*/
2 /*
3  * Copyright (c) 2013 Google Inc.
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/device.h>
21 #include <sys/mutex.h>
22 
23 #include <machine/bus.h>
24 #include <machine/intr.h>
25 
26 #include <dev/pv/vioscsireg.h>
27 #include <dev/pv/virtiovar.h>
28 
29 #include <scsi/scsi_all.h>
30 #include <scsi/scsiconf.h>
31 
32 enum { vioscsi_debug = 0 };
33 #define DPRINTF(f...) do { if (vioscsi_debug) printf(f); } while (0)
34 
35 /* Number of DMA segments for buffers that the device must support */
36 #define SEG_MAX		(MAXPHYS/PAGE_SIZE + 1)
37 /* In the virtqueue, we need space for header and footer, too */
38 #define ALLOC_SEGS	(SEG_MAX + 2)
39 
40 struct vioscsi_req {
41 	struct virtio_scsi_req_hdr	 vr_req;
42 	struct virtio_scsi_res_hdr	 vr_res;
43 	struct scsi_xfer		*vr_xs;
44 	bus_dmamap_t			 vr_control;
45 	bus_dmamap_t			 vr_data;
46 	SLIST_ENTRY(vioscsi_req)	 vr_list;
47 	int				 vr_qe_index;
48 };
49 
50 struct vioscsi_softc {
51 	struct device		 sc_dev;
52 	struct scsi_iopool	 sc_iopool;
53 	struct mutex		 sc_vr_mtx;
54 
55 	struct virtqueue	 sc_vqs[3];
56 	struct vioscsi_req	*sc_reqs;
57 	bus_dma_segment_t        sc_reqs_segs[1];
58 	SLIST_HEAD(, vioscsi_req) sc_freelist;
59 };
60 
61 int		 vioscsi_match(struct device *, void *, void *);
62 void		 vioscsi_attach(struct device *, struct device *, void *);
63 
64 int		 vioscsi_alloc_reqs(struct vioscsi_softc *,
65 		    struct virtio_softc *, int);
66 void		 vioscsi_scsi_cmd(struct scsi_xfer *);
67 int		 vioscsi_vq_done(struct virtqueue *);
68 void		 vioscsi_req_done(struct vioscsi_softc *, struct virtio_softc *,
69 		    struct vioscsi_req *);
70 void		*vioscsi_req_get(void *);
71 void		 vioscsi_req_put(void *, void *);
72 
73 const struct cfattach vioscsi_ca = {
74 	sizeof(struct vioscsi_softc),
75 	vioscsi_match,
76 	vioscsi_attach,
77 };
78 
79 struct cfdriver vioscsi_cd = {
80 	NULL, "vioscsi", DV_DULL,
81 };
82 
83 const struct scsi_adapter vioscsi_switch = {
84 	vioscsi_scsi_cmd, NULL, NULL, NULL, NULL
85 };
86 
87 const char *const vioscsi_vq_names[] = {
88 	"control",
89 	"event",
90 	"request",
91 };
92 
93 int
94 vioscsi_match(struct device *parent, void *self, void *aux)
95 {
96 	struct virtio_softc *va = (struct virtio_softc *)aux;
97 
98 	if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_SCSI)
99 		return (1);
100 	return (0);
101 }
102 
103 void
104 vioscsi_attach(struct device *parent, struct device *self, void *aux)
105 {
106 	struct virtio_softc *vsc = (struct virtio_softc *)parent;
107 	struct vioscsi_softc *sc = (struct vioscsi_softc *)self;
108 	struct scsibus_attach_args saa;
109 	int i, rv;
110 
111 	if (vsc->sc_child != NULL) {
112 		printf(": parent already has a child\n");
113 		return;
114 	}
115 	vsc->sc_child = &sc->sc_dev;
116 	vsc->sc_ipl = IPL_BIO;
117 
118 	// TODO(matthew): Negotiate hotplug.
119 
120 	vsc->sc_vqs = sc->sc_vqs;
121 	vsc->sc_nvqs = nitems(sc->sc_vqs);
122 
123 	virtio_negotiate_features(vsc, NULL);
124 	uint32_t cmd_per_lun = virtio_read_device_config_4(vsc,
125 	    VIRTIO_SCSI_CONFIG_CMD_PER_LUN);
126 	uint32_t seg_max = virtio_read_device_config_4(vsc,
127 	    VIRTIO_SCSI_CONFIG_SEG_MAX);
128 	uint16_t max_target = virtio_read_device_config_2(vsc,
129 	    VIRTIO_SCSI_CONFIG_MAX_TARGET);
130 
131 	if (seg_max < SEG_MAX) {
132 		printf("\nMax number of segments %d too small\n", seg_max);
133 		goto err;
134 	}
135 
136 	for (i = 0; i < nitems(sc->sc_vqs); i++) {
137 		rv = virtio_alloc_vq(vsc, &sc->sc_vqs[i], i, MAXPHYS,
138 		    ALLOC_SEGS, vioscsi_vq_names[i]);
139 		if (rv) {
140 			printf(": failed to allocate virtqueue %d\n", i);
141 			goto err;
142 		}
143 		sc->sc_vqs[i].vq_done = vioscsi_vq_done;
144 	}
145 
146 	int qsize = sc->sc_vqs[2].vq_num;
147 	printf(": qsize %d\n", qsize);
148 
149 	SLIST_INIT(&sc->sc_freelist);
150 	mtx_init(&sc->sc_vr_mtx, IPL_BIO);
151 	scsi_iopool_init(&sc->sc_iopool, sc, vioscsi_req_get, vioscsi_req_put);
152 
153 	int nreqs = vioscsi_alloc_reqs(sc, vsc, qsize);
154 	if (nreqs == 0) {
155 		printf("\nCan't alloc reqs\n");
156 		goto err;
157 	}
158 
159 	saa.saa_adapter = &vioscsi_switch;
160 	saa.saa_adapter_softc = sc;
161 	saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
162 	saa.saa_adapter_buswidth = max_target;
163 	saa.saa_luns = 8;
164 	saa.saa_openings = (nreqs > cmd_per_lun) ? cmd_per_lun : nreqs;
165 	saa.saa_pool = &sc->sc_iopool;
166 	saa.saa_quirks = saa.saa_flags = 0;
167 	saa.saa_wwpn = saa.saa_wwnn = 0;
168 
169 	config_found(self, &saa, scsiprint);
170 	return;
171 
172 err:
173 	vsc->sc_child = VIRTIO_CHILD_ERROR;
174 	return;
175 }
176 
177 void
178 vioscsi_scsi_cmd(struct scsi_xfer *xs)
179 {
180 	struct vioscsi_softc *sc = xs->sc_link->bus->sb_adapter_softc;
181 	struct virtio_softc *vsc = (struct virtio_softc *)sc->sc_dev.dv_parent;
182 	struct vioscsi_req *vr = xs->io;
183 	struct virtio_scsi_req_hdr *req = &vr->vr_req;
184 	struct virtqueue *vq = &sc->sc_vqs[2];
185 	int slot = vr->vr_qe_index;
186 
187 	DPRINTF("vioscsi_scsi_cmd: enter\n");
188 
189 	// TODO(matthew): Support bidirectional SCSI commands?
190 	if ((xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT))
191 	    == (SCSI_DATA_IN | SCSI_DATA_OUT)) {
192 		goto stuffup;
193 	}
194 
195 	vr->vr_xs = xs;
196 
197 	/*
198 	 * "The only supported format for the LUN field is: first byte set to
199 	 * 1, second byte set to target, third and fourth byte representing a
200 	 * single level LUN structure, followed by four zero bytes."
201 	 */
202 	if (xs->sc_link->target >= 256 || xs->sc_link->lun >= 16384)
203 		goto stuffup;
204 	req->lun[0] = 1;
205 	req->lun[1] = xs->sc_link->target;
206 	req->lun[2] = 0x40 | (xs->sc_link->lun >> 8);
207 	req->lun[3] = xs->sc_link->lun;
208 	memset(req->lun + 4, 0, 4);
209 
210 	if ((size_t)xs->cmdlen > sizeof(req->cdb))
211 		goto stuffup;
212 	memset(req->cdb, 0, sizeof(req->cdb));
213 	memcpy(req->cdb, &xs->cmd, xs->cmdlen);
214 
215 	int isread = !!(xs->flags & SCSI_DATA_IN);
216 
217 	int nsegs = 2;
218 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
219 		if (bus_dmamap_load(vsc->sc_dmat, vr->vr_data,
220 		    xs->data, xs->datalen, NULL,
221 		    ((isread ? BUS_DMA_READ : BUS_DMA_WRITE) |
222 		     BUS_DMA_NOWAIT)))
223 			goto stuffup;
224 		nsegs += vr->vr_data->dm_nsegs;
225 	}
226 
227 	/*
228 	 * Adjust reservation to the number needed, or virtio gets upset. Note
229 	 * that it may trim UP if 'xs' is being recycled w/o getting a new
230 	 * reservation!
231 	 */
232 	int s = splbio();
233 	virtio_enqueue_trim(vq, slot, nsegs);
234 	splx(s);
235 
236 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
237 	    offsetof(struct vioscsi_req, vr_req),
238 	    sizeof(struct virtio_scsi_req_hdr),
239 	    BUS_DMASYNC_PREWRITE);
240 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
241 	    offsetof(struct vioscsi_req, vr_res),
242             sizeof(struct virtio_scsi_res_hdr),
243 	    BUS_DMASYNC_PREREAD);
244 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT))
245 		bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen,
246 		    isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
247 
248 	s = splbio();
249 	virtio_enqueue_p(vq, slot, vr->vr_control,
250 	    offsetof(struct vioscsi_req, vr_req),
251             sizeof(struct virtio_scsi_req_hdr),
252 	    1);
253 	if (xs->flags & SCSI_DATA_OUT)
254 		virtio_enqueue(vq, slot, vr->vr_data, 1);
255 	virtio_enqueue_p(vq, slot, vr->vr_control,
256 	    offsetof(struct vioscsi_req, vr_res),
257             sizeof(struct virtio_scsi_res_hdr),
258 	    0);
259 	if (xs->flags & SCSI_DATA_IN)
260 		virtio_enqueue(vq, slot, vr->vr_data, 0);
261 
262 	virtio_enqueue_commit(vsc, vq, slot, 1);
263 
264 	if (ISSET(xs->flags, SCSI_POLL)) {
265 		DPRINTF("vioscsi_scsi_cmd: polling...\n");
266 		int timeout = 1000;
267 		do {
268 			virtio_poll_intr(vsc);
269 			if (vr->vr_xs != xs)
270 				break;
271 			delay(1000);
272 		} while (--timeout > 0);
273 		if (vr->vr_xs == xs) {
274 			// TODO(matthew): Abort the request.
275 			xs->error = XS_TIMEOUT;
276 			xs->resid = xs->datalen;
277 			DPRINTF("vioscsi_scsi_cmd: polling timeout\n");
278 			scsi_done(xs);
279 		}
280 		DPRINTF("vioscsi_scsi_cmd: done (timeout=%d)\n", timeout);
281 	}
282 	splx(s);
283 	return;
284 
285 stuffup:
286 	xs->error = XS_DRIVER_STUFFUP;
287 	xs->resid = xs->datalen;
288 	DPRINTF("vioscsi_scsi_cmd: stuffup\n");
289 	scsi_done(xs);
290 }
291 
292 void
293 vioscsi_req_done(struct vioscsi_softc *sc, struct virtio_softc *vsc,
294     struct vioscsi_req *vr)
295 {
296 	struct scsi_xfer *xs = vr->vr_xs;
297 	DPRINTF("vioscsi_req_done: enter vr: %p xs: %p\n", vr, xs);
298 
299 	int isread = !!(xs->flags & SCSI_DATA_IN);
300 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
301 	    offsetof(struct vioscsi_req, vr_req),
302 	    sizeof(struct virtio_scsi_req_hdr),
303 	    BUS_DMASYNC_POSTWRITE);
304 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
305 	    offsetof(struct vioscsi_req, vr_res),
306 	    sizeof(struct virtio_scsi_res_hdr),
307 	    BUS_DMASYNC_POSTREAD);
308 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
309 		bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen,
310 		    isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
311 		bus_dmamap_unload(vsc->sc_dmat, vr->vr_data);
312 	}
313 
314 	if (vr->vr_res.response != VIRTIO_SCSI_S_OK) {
315 		xs->error = XS_DRIVER_STUFFUP;
316 		xs->resid = xs->datalen;
317 		DPRINTF("vioscsi_req_done: stuffup: %d\n", vr->vr_res.response);
318 		goto done;
319 	}
320 
321 	size_t sense_len = MIN(sizeof(xs->sense), vr->vr_res.sense_len);
322 	memcpy(&xs->sense, vr->vr_res.sense, sense_len);
323 	xs->error = (sense_len == 0) ? XS_NOERROR : XS_SENSE;
324 
325 	xs->status = vr->vr_res.status;
326 	xs->resid = vr->vr_res.residual;
327 
328 	DPRINTF("vioscsi_req_done: done %d, %d, %zd\n",
329 	    xs->error, xs->status, xs->resid);
330 
331 done:
332 	vr->vr_xs = NULL;
333 	scsi_done(xs);
334 }
335 
336 int
337 vioscsi_vq_done(struct virtqueue *vq)
338 {
339 	struct virtio_softc *vsc = vq->vq_owner;
340 	struct vioscsi_softc *sc = (struct vioscsi_softc *)vsc->sc_child;
341 	struct vq_entry *qe;
342 	struct vioscsi_req *vr;
343 	int ret = 0;
344 
345 	DPRINTF("vioscsi_vq_done: enter\n");
346 
347 	for (;;) {
348 		int r, s, slot;
349 		s = splbio();
350 		r = virtio_dequeue(vsc, vq, &slot, NULL);
351 		splx(s);
352 		if (r != 0)
353 			break;
354 
355 		DPRINTF("vioscsi_vq_done: slot=%d\n", slot);
356 		qe = &vq->vq_entries[slot];
357 		vr = &sc->sc_reqs[qe->qe_vr_index];
358 		vioscsi_req_done(sc, vsc, vr);
359 		ret = 1;
360 	}
361 
362 	DPRINTF("vioscsi_vq_done: exit %d\n", ret);
363 
364 	return (ret);
365 }
366 
367 /*
368  * vioscso_req_get() provides the SCSI layer with all the
369  * resources necessary to start an I/O on the device.
370  *
371  * Since the size of the I/O is unknown at this time the
372  * resources allocated (a.k.a. reserved) must be sufficient
373  * to allow the maximum possible I/O size.
374  *
375  * When the I/O is actually attempted via vioscsi_scsi_cmd()
376  * excess resources will be returned via virtio_enqueue_trim().
377  */
378 void *
379 vioscsi_req_get(void *cookie)
380 {
381 	struct vioscsi_softc *sc = cookie;
382 	struct vioscsi_req *vr = NULL;
383 
384 	mtx_enter(&sc->sc_vr_mtx);
385 	vr = SLIST_FIRST(&sc->sc_freelist);
386 	if (vr != NULL)
387 		SLIST_REMOVE_HEAD(&sc->sc_freelist, vr_list);
388 	mtx_leave(&sc->sc_vr_mtx);
389 
390 	DPRINTF("vioscsi_req_get: %p\n", vr);
391 
392 	return (vr);
393 }
394 
395 void
396 vioscsi_req_put(void *cookie, void *io)
397 {
398 	struct vioscsi_softc *sc = cookie;
399 	struct vioscsi_req *vr = io;
400 
401 	DPRINTF("vioscsi_req_put: %p\n", vr);
402 
403 	mtx_enter(&sc->sc_vr_mtx);
404 	/*
405 	 * Do *NOT* call virtio_dequeue_commit()!
406 	 *
407 	 * Descriptors are permanently associated with the vioscsi_req and
408 	 * should not be placed on the free list!
409 	 */
410 	SLIST_INSERT_HEAD(&sc->sc_freelist, vr, vr_list);
411 	mtx_leave(&sc->sc_vr_mtx);
412 }
413 
414 int
415 vioscsi_alloc_reqs(struct vioscsi_softc *sc, struct virtio_softc *vsc,
416     int qsize)
417 {
418 	struct virtqueue *vq = &sc->sc_vqs[2];
419 	struct vioscsi_req *vr;
420 	struct vring_desc *vd;
421 	size_t allocsize;
422 	int i, r, nreqs, rsegs, slot;
423 	void *vaddr;
424 
425 	if (vq->vq_indirect != NULL)
426 		nreqs = qsize;
427 	else
428 		nreqs = qsize / ALLOC_SEGS;
429 
430 	allocsize = nreqs * sizeof(struct vioscsi_req);
431 	r = bus_dmamem_alloc(vsc->sc_dmat, allocsize, 0, 0,
432 	    &sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
433 	if (r != 0) {
434 		printf("bus_dmamem_alloc, size %zd, error %d\n",
435 		    allocsize, r);
436 		return 0;
437 	}
438 	r = bus_dmamem_map(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1,
439 	    allocsize, (caddr_t *)&vaddr, BUS_DMA_NOWAIT);
440 	if (r != 0) {
441 		printf("bus_dmamem_map failed, error %d\n", r);
442 		bus_dmamem_free(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1);
443 		return 0;
444 	}
445 	sc->sc_reqs = vaddr;
446 	memset(vaddr, 0, allocsize);
447 
448 	for (i = 0; i < nreqs; i++) {
449 		/*
450 		 * Assign descriptors and create the DMA maps for each
451 		 * allocated request.
452 		 */
453 		vr = &sc->sc_reqs[i];
454 		r = virtio_enqueue_prep(vq, &slot);
455 		if (r == 0)
456 			r = virtio_enqueue_reserve(vq, slot, ALLOC_SEGS);
457 		if (r != 0)
458 			return i;
459 
460 		if (vq->vq_indirect == NULL) {
461 			/*
462 			 * The reserved slots must be a contiguous block
463 			 * starting at vq_desc[slot].
464 			 */
465 			vd = &vq->vq_desc[slot];
466 			for (r = 0; r < ALLOC_SEGS - 1; r++) {
467 				DPRINTF("vd[%d].next = %d should be %d\n",
468 				    r, vd[r].next, (slot + r + 1));
469 				if (vd[r].next != (slot + r + 1))
470 					return i;
471 			}
472 			if (r == (ALLOC_SEGS -1) && vd[r].next != 0)
473 				return i;
474 			DPRINTF("Reserved slots are contiguous as required!\n");
475 		}
476 
477 		vr->vr_qe_index = slot;
478 		vr->vr_req.id = slot;
479 		vr->vr_req.task_attr = VIRTIO_SCSI_S_SIMPLE;
480 		vq->vq_entries[slot].qe_vr_index = i;
481 
482 		r = bus_dmamap_create(vsc->sc_dmat,
483 		    offsetof(struct vioscsi_req, vr_xs), 1,
484 		    offsetof(struct vioscsi_req, vr_xs), 0,
485 		    BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_control);
486 		if (r != 0) {
487 			printf("bus_dmamap_create vr_control failed, error  %d\n", r);
488 			return i;
489 		}
490 		r = bus_dmamap_create(vsc->sc_dmat, MAXPHYS, SEG_MAX,
491 		    MAXPHYS, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_data);
492 		if (r != 0) {
493 			printf("bus_dmamap_create vr_data failed, error %d\n", r );
494 			return i;
495 		}
496 		r = bus_dmamap_load(vsc->sc_dmat, vr->vr_control,
497 		    vr, offsetof(struct vioscsi_req, vr_xs), NULL,
498 		    BUS_DMA_NOWAIT);
499 		if (r != 0) {
500 			printf("bus_dmamap_load vr_control failed, error %d\n", r );
501 			return i;
502 		}
503 
504 		SLIST_INSERT_HEAD(&sc->sc_freelist, vr, vr_list);
505 	}
506 
507 	return nreqs;
508 }
509