xref: /openbsd-src/sys/dev/pv/vioscsi.c (revision 4e1ee0786f11cc571bd0be17d38e46f635c719fc)
1 /*	$OpenBSD: vioscsi.c,v 1.26 2020/09/22 19:32:53 krw Exp $	*/
2 /*
3  * Copyright (c) 2013 Google Inc.
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/device.h>
21 #include <sys/mutex.h>
22 
23 #include <machine/bus.h>
24 #include <machine/intr.h>
25 
26 #include <dev/pv/vioscsireg.h>
27 #include <dev/pv/virtiovar.h>
28 
29 #include <scsi/scsi_all.h>
30 #include <scsi/scsiconf.h>
31 
32 enum { vioscsi_debug = 0 };
33 #define DPRINTF(f...) do { if (vioscsi_debug) printf(f); } while (0)
34 
35 /* Number of DMA segments for buffers that the device must support */
36 #define SEG_MAX		(MAXPHYS/PAGE_SIZE + 1)
37 /* In the virtqueue, we need space for header and footer, too */
38 #define ALLOC_SEGS	(SEG_MAX + 2)
39 
40 struct vioscsi_req {
41 	struct virtio_scsi_req_hdr	 vr_req;
42 	struct virtio_scsi_res_hdr	 vr_res;
43 	struct scsi_xfer		*vr_xs;
44 	bus_dmamap_t			 vr_control;
45 	bus_dmamap_t			 vr_data;
46 	SLIST_ENTRY(vioscsi_req)	 vr_list;
47 	int				 vr_qe_index;
48 };
49 
50 struct vioscsi_softc {
51 	struct device		 sc_dev;
52 	struct scsi_iopool	 sc_iopool;
53 	struct mutex		 sc_vr_mtx;
54 
55 	struct virtqueue	 sc_vqs[3];
56 	struct vioscsi_req	*sc_reqs;
57 	bus_dma_segment_t        sc_reqs_segs[1];
58 	SLIST_HEAD(, vioscsi_req) sc_freelist;
59 };
60 
61 int		 vioscsi_match(struct device *, void *, void *);
62 void		 vioscsi_attach(struct device *, struct device *, void *);
63 
64 int		 vioscsi_alloc_reqs(struct vioscsi_softc *,
65 		    struct virtio_softc *, int);
66 void		 vioscsi_scsi_cmd(struct scsi_xfer *);
67 int		 vioscsi_vq_done(struct virtqueue *);
68 void		 vioscsi_req_done(struct vioscsi_softc *, struct virtio_softc *,
69 		    struct vioscsi_req *);
70 void		*vioscsi_req_get(void *);
71 void		 vioscsi_req_put(void *, void *);
72 
73 struct cfattach vioscsi_ca = {
74 	sizeof(struct vioscsi_softc),
75 	vioscsi_match,
76 	vioscsi_attach,
77 };
78 
79 struct cfdriver vioscsi_cd = {
80 	NULL,
81 	"vioscsi",
82 	DV_DULL,
83 };
84 
85 struct scsi_adapter vioscsi_switch = {
86 	vioscsi_scsi_cmd, NULL, NULL, NULL, NULL
87 };
88 
89 const char *const vioscsi_vq_names[] = {
90 	"control",
91 	"event",
92 	"request",
93 };
94 
95 int
96 vioscsi_match(struct device *parent, void *self, void *aux)
97 {
98 	struct virtio_softc *va = (struct virtio_softc *)aux;
99 
100 	if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_SCSI)
101 		return (1);
102 	return (0);
103 }
104 
105 void
106 vioscsi_attach(struct device *parent, struct device *self, void *aux)
107 {
108 	struct virtio_softc *vsc = (struct virtio_softc *)parent;
109 	struct vioscsi_softc *sc = (struct vioscsi_softc *)self;
110 	struct scsibus_attach_args saa;
111 	int i, rv;
112 
113 	if (vsc->sc_child != NULL) {
114 		printf(": parent already has a child\n");
115 		return;
116 	}
117 	vsc->sc_child = &sc->sc_dev;
118 	vsc->sc_ipl = IPL_BIO;
119 
120 	// TODO(matthew): Negotiate hotplug.
121 
122 	vsc->sc_vqs = sc->sc_vqs;
123 	vsc->sc_nvqs = nitems(sc->sc_vqs);
124 
125 	virtio_negotiate_features(vsc, NULL);
126 	uint32_t cmd_per_lun = virtio_read_device_config_4(vsc,
127 	    VIRTIO_SCSI_CONFIG_CMD_PER_LUN);
128 	uint32_t seg_max = virtio_read_device_config_4(vsc,
129 	    VIRTIO_SCSI_CONFIG_SEG_MAX);
130 	uint16_t max_target = virtio_read_device_config_2(vsc,
131 	    VIRTIO_SCSI_CONFIG_MAX_TARGET);
132 
133 	if (seg_max < SEG_MAX) {
134 		printf("\nMax number of segments %d too small\n", seg_max);
135 		goto err;
136 	}
137 
138 	for (i = 0; i < nitems(sc->sc_vqs); i++) {
139 		rv = virtio_alloc_vq(vsc, &sc->sc_vqs[i], i, MAXPHYS,
140 		    ALLOC_SEGS, vioscsi_vq_names[i]);
141 		if (rv) {
142 			printf(": failed to allocate virtqueue %d\n", i);
143 			goto err;
144 		}
145 		sc->sc_vqs[i].vq_done = vioscsi_vq_done;
146 	}
147 
148 	int qsize = sc->sc_vqs[2].vq_num;
149 	printf(": qsize %d\n", qsize);
150 
151 	SLIST_INIT(&sc->sc_freelist);
152 	mtx_init(&sc->sc_vr_mtx, IPL_BIO);
153 	scsi_iopool_init(&sc->sc_iopool, sc, vioscsi_req_get, vioscsi_req_put);
154 
155 	int nreqs = vioscsi_alloc_reqs(sc, vsc, qsize);
156 	if (nreqs == 0) {
157 		printf("\nCan't alloc reqs\n");
158 		goto err;
159 	}
160 
161 	saa.saa_adapter = &vioscsi_switch;
162 	saa.saa_adapter_softc = sc;
163 	saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
164 	saa.saa_adapter_buswidth = max_target;
165 	saa.saa_luns = 8;
166 	saa.saa_openings = (nreqs > cmd_per_lun) ? cmd_per_lun : nreqs;
167 	saa.saa_pool = &sc->sc_iopool;
168 	saa.saa_quirks = saa.saa_flags = 0;
169 	saa.saa_wwpn = saa.saa_wwnn = 0;
170 
171 	config_found(self, &saa, scsiprint);
172 	return;
173 
174 err:
175 	vsc->sc_child = VIRTIO_CHILD_ERROR;
176 	return;
177 }
178 
179 void
180 vioscsi_scsi_cmd(struct scsi_xfer *xs)
181 {
182 	struct vioscsi_softc *sc = xs->sc_link->bus->sb_adapter_softc;
183 	struct virtio_softc *vsc = (struct virtio_softc *)sc->sc_dev.dv_parent;
184 	struct vioscsi_req *vr = xs->io;
185 	struct virtio_scsi_req_hdr *req = &vr->vr_req;
186 	struct virtqueue *vq = &sc->sc_vqs[2];
187 	int slot = vr->vr_qe_index;
188 
189 	DPRINTF("vioscsi_scsi_cmd: enter\n");
190 
191 	// TODO(matthew): Support bidirectional SCSI commands?
192 	if ((xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT))
193 	    == (SCSI_DATA_IN | SCSI_DATA_OUT)) {
194 		goto stuffup;
195 	}
196 
197 	vr->vr_xs = xs;
198 
199 	/*
200 	 * "The only supported format for the LUN field is: first byte set to
201 	 * 1, second byte set to target, third and fourth byte representing a
202 	 * single level LUN structure, followed by four zero bytes."
203 	 */
204 	if (xs->sc_link->target >= 256 || xs->sc_link->lun >= 16384)
205 		goto stuffup;
206 	req->lun[0] = 1;
207 	req->lun[1] = xs->sc_link->target;
208 	req->lun[2] = 0x40 | (xs->sc_link->lun >> 8);
209 	req->lun[3] = xs->sc_link->lun;
210 	memset(req->lun + 4, 0, 4);
211 
212 	if ((size_t)xs->cmdlen > sizeof(req->cdb))
213 		goto stuffup;
214 	memset(req->cdb, 0, sizeof(req->cdb));
215 	memcpy(req->cdb, &xs->cmd, xs->cmdlen);
216 
217 	int isread = !!(xs->flags & SCSI_DATA_IN);
218 
219 	int nsegs = 2;
220 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
221 		if (bus_dmamap_load(vsc->sc_dmat, vr->vr_data,
222 		    xs->data, xs->datalen, NULL,
223 		    ((isread ? BUS_DMA_READ : BUS_DMA_WRITE) |
224 		     BUS_DMA_NOWAIT)))
225 			goto stuffup;
226 		nsegs += vr->vr_data->dm_nsegs;
227 	}
228 
229 	/*
230 	 * Adjust reservation to the number needed, or virtio gets upset. Note
231 	 * that it may trim UP if 'xs' is being recycled w/o getting a new
232 	 * reservation!
233 	 */
234 	int s = splbio();
235 	virtio_enqueue_trim(vq, slot, nsegs);
236 	splx(s);
237 
238 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
239 	    offsetof(struct vioscsi_req, vr_req),
240 	    sizeof(struct virtio_scsi_req_hdr),
241 	    BUS_DMASYNC_PREWRITE);
242 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
243 	    offsetof(struct vioscsi_req, vr_res),
244             sizeof(struct virtio_scsi_res_hdr),
245 	    BUS_DMASYNC_PREREAD);
246 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT))
247 		bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen,
248 		    isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
249 
250 	s = splbio();
251 	virtio_enqueue_p(vq, slot, vr->vr_control,
252 	    offsetof(struct vioscsi_req, vr_req),
253             sizeof(struct virtio_scsi_req_hdr),
254 	    1);
255 	if (xs->flags & SCSI_DATA_OUT)
256 		virtio_enqueue(vq, slot, vr->vr_data, 1);
257 	virtio_enqueue_p(vq, slot, vr->vr_control,
258 	    offsetof(struct vioscsi_req, vr_res),
259             sizeof(struct virtio_scsi_res_hdr),
260 	    0);
261 	if (xs->flags & SCSI_DATA_IN)
262 		virtio_enqueue(vq, slot, vr->vr_data, 0);
263 
264 	virtio_enqueue_commit(vsc, vq, slot, 1);
265 
266 	if (ISSET(xs->flags, SCSI_POLL)) {
267 		DPRINTF("vioscsi_scsi_cmd: polling...\n");
268 		int timeout = 1000;
269 		do {
270 			virtio_poll_intr(vsc);
271 			if (vr->vr_xs != xs)
272 				break;
273 			delay(1000);
274 		} while (--timeout > 0);
275 		if (vr->vr_xs == xs) {
276 			// TODO(matthew): Abort the request.
277 			xs->error = XS_TIMEOUT;
278 			xs->resid = xs->datalen;
279 			DPRINTF("vioscsi_scsi_cmd: polling timeout\n");
280 			scsi_done(xs);
281 		}
282 		DPRINTF("vioscsi_scsi_cmd: done (timeout=%d)\n", timeout);
283 	}
284 	splx(s);
285 	return;
286 
287 stuffup:
288 	xs->error = XS_DRIVER_STUFFUP;
289 	xs->resid = xs->datalen;
290 	DPRINTF("vioscsi_scsi_cmd: stuffup\n");
291 	scsi_done(xs);
292 }
293 
294 void
295 vioscsi_req_done(struct vioscsi_softc *sc, struct virtio_softc *vsc,
296     struct vioscsi_req *vr)
297 {
298 	struct scsi_xfer *xs = vr->vr_xs;
299 	DPRINTF("vioscsi_req_done: enter vr: %p xs: %p\n", vr, xs);
300 
301 	int isread = !!(xs->flags & SCSI_DATA_IN);
302 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
303 	    offsetof(struct vioscsi_req, vr_req),
304 	    sizeof(struct virtio_scsi_req_hdr),
305 	    BUS_DMASYNC_POSTWRITE);
306 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
307 	    offsetof(struct vioscsi_req, vr_res),
308 	    sizeof(struct virtio_scsi_res_hdr),
309 	    BUS_DMASYNC_POSTREAD);
310 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
311 		bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen,
312 		    isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
313 		bus_dmamap_unload(vsc->sc_dmat, vr->vr_data);
314 	}
315 
316 	if (vr->vr_res.response != VIRTIO_SCSI_S_OK) {
317 		xs->error = XS_DRIVER_STUFFUP;
318 		xs->resid = xs->datalen;
319 		DPRINTF("vioscsi_req_done: stuffup: %d\n", vr->vr_res.response);
320 		goto done;
321 	}
322 
323 	size_t sense_len = MIN(sizeof(xs->sense), vr->vr_res.sense_len);
324 	memcpy(&xs->sense, vr->vr_res.sense, sense_len);
325 	xs->error = (sense_len == 0) ? XS_NOERROR : XS_SENSE;
326 
327 	xs->status = vr->vr_res.status;
328 	xs->resid = vr->vr_res.residual;
329 
330 	DPRINTF("vioscsi_req_done: done %d, %d, %zd\n",
331 	    xs->error, xs->status, xs->resid);
332 
333 done:
334 	vr->vr_xs = NULL;
335 	scsi_done(xs);
336 }
337 
338 int
339 vioscsi_vq_done(struct virtqueue *vq)
340 {
341 	struct virtio_softc *vsc = vq->vq_owner;
342 	struct vioscsi_softc *sc = (struct vioscsi_softc *)vsc->sc_child;
343 	struct vq_entry *qe;
344 	struct vioscsi_req *vr;
345 	int ret = 0;
346 
347 	DPRINTF("vioscsi_vq_done: enter\n");
348 
349 	for (;;) {
350 		int r, s, slot;
351 		s = splbio();
352 		r = virtio_dequeue(vsc, vq, &slot, NULL);
353 		splx(s);
354 		if (r != 0)
355 			break;
356 
357 		DPRINTF("vioscsi_vq_done: slot=%d\n", slot);
358 		qe = &vq->vq_entries[slot];
359 		vr = &sc->sc_reqs[qe->qe_vr_index];
360 		vioscsi_req_done(sc, vsc, vr);
361 		ret = 1;
362 	}
363 
364 	DPRINTF("vioscsi_vq_done: exit %d\n", ret);
365 
366 	return (ret);
367 }
368 
369 /*
370  * vioscso_req_get() provides the SCSI layer with all the
371  * resources necessary to start an I/O on the device.
372  *
373  * Since the size of the I/O is unknown at this time the
374  * resouces allocated (a.k.a. reserved) must be sufficient
375  * to allow the maximum possible I/O size.
376  *
377  * When the I/O is actually attempted via vioscsi_scsi_cmd()
378  * excess resources will be returned via virtio_enqueue_trim().
379  */
380 void *
381 vioscsi_req_get(void *cookie)
382 {
383 	struct vioscsi_softc *sc = cookie;
384 	struct vioscsi_req *vr = NULL;
385 
386 	mtx_enter(&sc->sc_vr_mtx);
387 	vr = SLIST_FIRST(&sc->sc_freelist);
388 	if (vr != NULL)
389 		SLIST_REMOVE_HEAD(&sc->sc_freelist, vr_list);
390 	mtx_leave(&sc->sc_vr_mtx);
391 
392 	DPRINTF("vioscsi_req_get: %p\n", vr);
393 
394 	return (vr);
395 }
396 
397 void
398 vioscsi_req_put(void *cookie, void *io)
399 {
400 	struct vioscsi_softc *sc = cookie;
401 	struct vioscsi_req *vr = io;
402 
403 	DPRINTF("vioscsi_req_put: %p\n", vr);
404 
405 	mtx_enter(&sc->sc_vr_mtx);
406 	/*
407 	 * Do *NOT* call virtio_dequeue_commit()!
408 	 *
409 	 * Descriptors are permanently associated with the vioscsi_req and
410 	 * should not be placed on the free list!
411 	 */
412 	SLIST_INSERT_HEAD(&sc->sc_freelist, vr, vr_list);
413 	mtx_leave(&sc->sc_vr_mtx);
414 }
415 
416 int
417 vioscsi_alloc_reqs(struct vioscsi_softc *sc, struct virtio_softc *vsc,
418     int qsize)
419 {
420 	struct virtqueue *vq = &sc->sc_vqs[2];
421 	struct vioscsi_req *vr;
422 	struct vring_desc *vd;
423 	size_t allocsize;
424 	int i, r, nreqs, rsegs, slot;
425 	void *vaddr;
426 
427 	if (vq->vq_indirect != NULL)
428 		nreqs = qsize;
429 	else
430 		nreqs = qsize / ALLOC_SEGS;
431 
432 	allocsize = nreqs * sizeof(struct vioscsi_req);
433 	r = bus_dmamem_alloc(vsc->sc_dmat, allocsize, 0, 0,
434 	    &sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
435 	if (r != 0) {
436 		printf("bus_dmamem_alloc, size %zd, error %d\n",
437 		    allocsize, r);
438 		return 0;
439 	}
440 	r = bus_dmamem_map(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1,
441 	    allocsize, (caddr_t *)&vaddr, BUS_DMA_NOWAIT);
442 	if (r != 0) {
443 		printf("bus_dmamem_map failed, error %d\n", r);
444 		bus_dmamem_free(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1);
445 		return 0;
446 	}
447 	sc->sc_reqs = vaddr;
448 	memset(vaddr, 0, allocsize);
449 
450 	for (i = 0; i < nreqs; i++) {
451 		/*
452 		 * Assign descriptors and create the DMA maps for each
453 		 * allocated request.
454 		 */
455 		vr = &sc->sc_reqs[i];
456 		r = virtio_enqueue_prep(vq, &slot);
457 		if (r == 0)
458 			r = virtio_enqueue_reserve(vq, slot, ALLOC_SEGS);
459 		if (r != 0)
460 			return i;
461 
462 		if (vq->vq_indirect == NULL) {
463 			/*
464 			 * The reserved slots must be a contiguous block
465 			 * starting at vq_desc[slot].
466 			 */
467 			vd = &vq->vq_desc[slot];
468 			for (r = 0; r < ALLOC_SEGS - 1; r++) {
469 				DPRINTF("vd[%d].next = %d should be %d\n",
470 				    r, vd[r].next, (slot + r + 1));
471 				if (vd[r].next != (slot + r + 1))
472 					return i;
473 			}
474 			if (r == (ALLOC_SEGS -1) && vd[r].next != 0)
475 				return i;
476 			DPRINTF("Reserved slots are contiguous as required!\n");
477 		}
478 
479 		vr->vr_qe_index = slot;
480 		vr->vr_req.id = slot;
481 		vr->vr_req.task_attr = VIRTIO_SCSI_S_SIMPLE;
482 		vq->vq_entries[slot].qe_vr_index = i;
483 
484 		r = bus_dmamap_create(vsc->sc_dmat,
485 		    offsetof(struct vioscsi_req, vr_xs), 1,
486 		    offsetof(struct vioscsi_req, vr_xs), 0,
487 		    BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_control);
488 		if (r != 0) {
489 			printf("bus_dmamap_create vr_control failed, error  %d\n", r);
490 			return i;
491 		}
492 		r = bus_dmamap_create(vsc->sc_dmat, MAXPHYS, SEG_MAX,
493 		    MAXPHYS, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_data);
494 		if (r != 0) {
495 			printf("bus_dmamap_create vr_data failed, error %d\n", r );
496 			return i;
497 		}
498 		r = bus_dmamap_load(vsc->sc_dmat, vr->vr_control,
499 		    vr, offsetof(struct vioscsi_req, vr_xs), NULL,
500 		    BUS_DMA_NOWAIT);
501 		if (r != 0) {
502 			printf("bus_dmamap_load vr_control failed, error %d\n", r );
503 			return i;
504 		}
505 
506 		SLIST_INSERT_HEAD(&sc->sc_freelist, vr, vr_list);
507 	}
508 
509 	return nreqs;
510 }
511