xref: /openbsd-src/sys/dev/pv/vioscsi.c (revision 6396a31b28c13abcc71f05292f11b42abbafd7d3)
1 /*	$OpenBSD: vioscsi.c,v 1.13 2019/05/26 15:20:04 sf Exp $	*/
2 /*
3  * Copyright (c) 2013 Google Inc.
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/device.h>
21 #include <sys/mutex.h>
22 
23 #include <machine/bus.h>
24 #include <machine/intr.h>
25 
26 #include <dev/pv/vioscsireg.h>
27 #include <dev/pv/virtiovar.h>
28 
29 #include <scsi/scsi_all.h>
30 #include <scsi/scsiconf.h>
31 
32 enum { vioscsi_debug = 0 };
33 #define DPRINTF(f...) do { if (vioscsi_debug) printf(f); } while (0)
34 
35 #define MAX_XFER	MAX(MAXPHYS,MAXBSIZE)
36 /* Number of DMA segments for buffers that the device must support */
37 #define SEG_MAX		(MAX_XFER/PAGE_SIZE + 1)
38 /* In the virtqueue, we need space for header and footer, too */
39 #define ALLOC_SEGS	(SEG_MAX + 2)
40 
41 struct vioscsi_req {
42 	struct virtio_scsi_req_hdr	 vr_req;
43 	struct virtio_scsi_res_hdr	 vr_res;
44 	struct scsi_xfer		*vr_xs;
45 	bus_dmamap_t			 vr_control;
46 	bus_dmamap_t			 vr_data;
47 	SLIST_ENTRY(vioscsi_req)	 vr_list;
48 	int				 vr_qe_index;
49 };
50 
51 struct vioscsi_softc {
52 	struct device		 sc_dev;
53 	struct scsi_link	 sc_link;
54 	struct scsibus		*sc_scsibus;
55 	struct scsi_iopool	 sc_iopool;
56 	struct mutex		 sc_vr_mtx;
57 
58 	struct virtqueue	 sc_vqs[3];
59 	struct vioscsi_req	*sc_reqs;
60 	bus_dma_segment_t        sc_reqs_segs[1];
61 	SLIST_HEAD(, vioscsi_req) sc_freelist;
62 };
63 
64 int		 vioscsi_match(struct device *, void *, void *);
65 void		 vioscsi_attach(struct device *, struct device *, void *);
66 
67 int		 vioscsi_alloc_reqs(struct vioscsi_softc *,
68 		    struct virtio_softc *, int);
69 void		 vioscsi_scsi_cmd(struct scsi_xfer *);
70 int		 vioscsi_vq_done(struct virtqueue *);
71 void		 vioscsi_req_done(struct vioscsi_softc *, struct virtio_softc *,
72 		    struct vioscsi_req *);
73 void		*vioscsi_req_get(void *);
74 void		 vioscsi_req_put(void *, void *);
75 
76 struct cfattach vioscsi_ca = {
77 	sizeof(struct vioscsi_softc),
78 	vioscsi_match,
79 	vioscsi_attach,
80 };
81 
82 struct cfdriver vioscsi_cd = {
83 	NULL,
84 	"vioscsi",
85 	DV_DULL,
86 };
87 
88 struct scsi_adapter vioscsi_switch = {
89 	vioscsi_scsi_cmd,
90 	scsi_minphys,
91 };
92 
93 const char *const vioscsi_vq_names[] = {
94 	"control",
95 	"event",
96 	"request",
97 };
98 
99 int
100 vioscsi_match(struct device *parent, void *self, void *aux)
101 {
102 	struct virtio_softc *va = (struct virtio_softc *)aux;
103 
104 	if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_SCSI)
105 		return (1);
106 	return (0);
107 }
108 
109 void
110 vioscsi_attach(struct device *parent, struct device *self, void *aux)
111 {
112 	struct virtio_softc *vsc = (struct virtio_softc *)parent;
113 	struct vioscsi_softc *sc = (struct vioscsi_softc *)self;
114 	struct scsibus_attach_args saa;
115 	int i, rv;
116 
117 	if (vsc->sc_child != NULL) {
118 		printf(": parent already has a child\n");
119 		return;
120 	}
121 	vsc->sc_child = &sc->sc_dev;
122 	vsc->sc_ipl = IPL_BIO;
123 
124 	// TODO(matthew): Negotiate hotplug.
125 
126 	vsc->sc_vqs = sc->sc_vqs;
127 	vsc->sc_nvqs = nitems(sc->sc_vqs);
128 
129 	virtio_negotiate_features(vsc, NULL);
130 	uint32_t cmd_per_lun = virtio_read_device_config_4(vsc,
131 	    VIRTIO_SCSI_CONFIG_CMD_PER_LUN);
132 	uint32_t seg_max = virtio_read_device_config_4(vsc,
133 	    VIRTIO_SCSI_CONFIG_SEG_MAX);
134 	uint16_t max_target = virtio_read_device_config_2(vsc,
135 	    VIRTIO_SCSI_CONFIG_MAX_TARGET);
136 
137 	if (seg_max < SEG_MAX) {
138 		printf("\nMax number of segments %d too small\n", seg_max);
139 		goto err;
140 	}
141 
142 	for (i = 0; i < nitems(sc->sc_vqs); i++) {
143 		rv = virtio_alloc_vq(vsc, &sc->sc_vqs[i], i, MAX_XFER,
144 		    ALLOC_SEGS, vioscsi_vq_names[i]);
145 		if (rv) {
146 			printf(": failed to allocate virtqueue %d\n", i);
147 			goto err;
148 		}
149 		sc->sc_vqs[i].vq_done = vioscsi_vq_done;
150 	}
151 
152 	int qsize = sc->sc_vqs[2].vq_num;
153 	printf(": qsize %d\n", qsize);
154 
155 	SLIST_INIT(&sc->sc_freelist);
156 	mtx_init(&sc->sc_vr_mtx, IPL_BIO);
157 	scsi_iopool_init(&sc->sc_iopool, sc, vioscsi_req_get, vioscsi_req_put);
158 
159 	sc->sc_link.openings = vioscsi_alloc_reqs(sc, vsc, qsize);
160 	if (sc->sc_link.openings == 0) {
161 		printf("\nCan't alloc reqs\n");
162 		goto err;
163 	} else if (sc->sc_link.openings > cmd_per_lun)
164 		sc->sc_link.openings = cmd_per_lun;
165 
166 	sc->sc_link.adapter = &vioscsi_switch;
167 	sc->sc_link.adapter_softc = sc;
168 	sc->sc_link.adapter_target = max_target;
169 	sc->sc_link.adapter_buswidth = max_target;
170 	sc->sc_link.pool = &sc->sc_iopool;
171 
172 	bzero(&saa, sizeof(saa));
173 	saa.saa_sc_link = &sc->sc_link;
174 
175 	sc->sc_scsibus = (struct scsibus *)config_found(self, &saa, scsiprint);
176 	return;
177 
178 err:
179 	vsc->sc_child = VIRTIO_CHILD_ERROR;
180 	return;
181 }
182 
183 void
184 vioscsi_scsi_cmd(struct scsi_xfer *xs)
185 {
186 	struct vioscsi_softc *sc = xs->sc_link->adapter_softc;
187 	struct virtio_softc *vsc = (struct virtio_softc *)sc->sc_dev.dv_parent;
188 	struct vioscsi_req *vr = xs->io;
189 	struct virtio_scsi_req_hdr *req = &vr->vr_req;
190 	struct virtqueue *vq = &sc->sc_vqs[2];
191 	int slot = vr->vr_qe_index;
192 
193 	DPRINTF("vioscsi_scsi_cmd: enter\n");
194 
195 	// TODO(matthew): Support bidirectional SCSI commands?
196 	if ((xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT))
197 	    == (SCSI_DATA_IN | SCSI_DATA_OUT)) {
198 		goto stuffup;
199 	}
200 
201 	vr->vr_xs = xs;
202 
203 	/*
204 	 * "The only supported format for the LUN field is: first byte set to
205 	 * 1, second byte set to target, third and fourth byte representing a
206 	 * single level LUN structure, followed by four zero bytes."
207 	 */
208 	if (xs->sc_link->target >= 256 || xs->sc_link->lun >= 16384)
209 		goto stuffup;
210 	req->lun[0] = 1;
211 	req->lun[1] = xs->sc_link->target;
212 	req->lun[2] = 0x40 | (xs->sc_link->lun >> 8);
213 	req->lun[3] = xs->sc_link->lun;
214 	memset(req->lun + 4, 0, 4);
215 
216 	if ((size_t)xs->cmdlen > sizeof(req->cdb))
217 		goto stuffup;
218 	memset(req->cdb, 0, sizeof(req->cdb));
219 	memcpy(req->cdb, xs->cmd, xs->cmdlen);
220 
221 	int isread = !!(xs->flags & SCSI_DATA_IN);
222 
223 	int nsegs = 2;
224 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
225 		if (bus_dmamap_load(vsc->sc_dmat, vr->vr_data,
226 		    xs->data, xs->datalen, NULL,
227 		    ((isread ? BUS_DMA_READ : BUS_DMA_WRITE) |
228 		     BUS_DMA_NOWAIT)))
229 			goto stuffup;
230 		nsegs += vr->vr_data->dm_nsegs;
231 	}
232 
233 	/*
234 	 * Adjust reservation to the number needed, or virtio gets upset. Note
235 	 * that it may trim UP if 'xs' is being recycled w/o getting a new
236 	 * reservation!
237 	 */
238 	int s = splbio();
239 	virtio_enqueue_trim(vq, slot, nsegs);
240 	splx(s);
241 
242 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
243 	    offsetof(struct vioscsi_req, vr_req),
244 	    sizeof(struct virtio_scsi_req_hdr),
245 	    BUS_DMASYNC_PREWRITE);
246 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
247 	    offsetof(struct vioscsi_req, vr_res),
248             sizeof(struct virtio_scsi_res_hdr),
249 	    BUS_DMASYNC_PREREAD);
250 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT))
251 		bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen,
252 		    isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
253 
254 	s = splbio();
255 	virtio_enqueue_p(vq, slot, vr->vr_control,
256 	    offsetof(struct vioscsi_req, vr_req),
257             sizeof(struct virtio_scsi_req_hdr),
258 	    1);
259 	if (xs->flags & SCSI_DATA_OUT)
260 		virtio_enqueue(vq, slot, vr->vr_data, 1);
261 	virtio_enqueue_p(vq, slot, vr->vr_control,
262 	    offsetof(struct vioscsi_req, vr_res),
263             sizeof(struct virtio_scsi_res_hdr),
264 	    0);
265 	if (xs->flags & SCSI_DATA_IN)
266 		virtio_enqueue(vq, slot, vr->vr_data, 0);
267 
268 	virtio_enqueue_commit(vsc, vq, slot, 1);
269 
270 	if (ISSET(xs->flags, SCSI_POLL)) {
271 		DPRINTF("vioscsi_scsi_cmd: polling...\n");
272 		int timeout = 1000;
273 		do {
274 			virtio_poll_intr(vsc);
275 			if (vr->vr_xs != xs)
276 				break;
277 			delay(1000);
278 		} while (--timeout > 0);
279 		if (vr->vr_xs == xs) {
280 			// TODO(matthew): Abort the request.
281 			xs->error = XS_TIMEOUT;
282 			xs->resid = xs->datalen;
283 			DPRINTF("vioscsi_scsi_cmd: polling timeout\n");
284 			scsi_done(xs);
285 		}
286 		DPRINTF("vioscsi_scsi_cmd: done (timeout=%d)\n", timeout);
287 	}
288 	splx(s);
289 	return;
290 
291 stuffup:
292 	xs->error = XS_DRIVER_STUFFUP;
293 	xs->resid = xs->datalen;
294 	DPRINTF("vioscsi_scsi_cmd: stuffup\n");
295 	scsi_done(xs);
296 }
297 
298 void
299 vioscsi_req_done(struct vioscsi_softc *sc, struct virtio_softc *vsc,
300     struct vioscsi_req *vr)
301 {
302 	struct scsi_xfer *xs = vr->vr_xs;
303 	DPRINTF("vioscsi_req_done: enter vr: %p xs: %p\n", vr, xs);
304 
305 	int isread = !!(xs->flags & SCSI_DATA_IN);
306 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
307 	    offsetof(struct vioscsi_req, vr_req),
308 	    sizeof(struct virtio_scsi_req_hdr),
309 	    BUS_DMASYNC_POSTWRITE);
310 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
311 	    offsetof(struct vioscsi_req, vr_res),
312 	    sizeof(struct virtio_scsi_res_hdr),
313 	    BUS_DMASYNC_POSTREAD);
314 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
315 		bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen,
316 		    isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
317 		bus_dmamap_unload(vsc->sc_dmat, vr->vr_data);
318 	}
319 
320 	if (vr->vr_res.response != VIRTIO_SCSI_S_OK) {
321 		xs->error = XS_DRIVER_STUFFUP;
322 		xs->resid = xs->datalen;
323 		DPRINTF("vioscsi_req_done: stuffup: %d\n", vr->vr_res.response);
324 		goto done;
325 	}
326 
327 	size_t sense_len = MIN(sizeof(xs->sense), vr->vr_res.sense_len);
328 	memcpy(&xs->sense, vr->vr_res.sense, sense_len);
329 	xs->error = (sense_len == 0) ? XS_NOERROR : XS_SENSE;
330 
331 	xs->status = vr->vr_res.status;
332 	xs->resid = vr->vr_res.residual;
333 
334 	DPRINTF("vioscsi_req_done: done %d, %d, %zd\n",
335 	    xs->error, xs->status, xs->resid);
336 
337 done:
338 	vr->vr_xs = NULL;
339 	scsi_done(xs);
340 }
341 
342 int
343 vioscsi_vq_done(struct virtqueue *vq)
344 {
345 	struct virtio_softc *vsc = vq->vq_owner;
346 	struct vioscsi_softc *sc = (struct vioscsi_softc *)vsc->sc_child;
347 	struct vq_entry *qe;
348 	struct vioscsi_req *vr;
349 	int ret = 0;
350 
351 	DPRINTF("vioscsi_vq_done: enter\n");
352 
353 	for (;;) {
354 		int r, s, slot;
355 		s = splbio();
356 		r = virtio_dequeue(vsc, vq, &slot, NULL);
357 		splx(s);
358 		if (r != 0)
359 			break;
360 
361 		DPRINTF("vioscsi_vq_done: slot=%d\n", slot);
362 		qe = &vq->vq_entries[slot];
363 		vr = &sc->sc_reqs[qe->qe_vr_index];
364 		vioscsi_req_done(sc, vsc, vr);
365 		ret = 1;
366 	}
367 
368 	DPRINTF("vioscsi_vq_done: exit %d\n", ret);
369 
370 	return (ret);
371 }
372 
373 /*
374  * vioscso_req_get() provides the SCSI layer with all the
375  * resources necessary to start an I/O on the device.
376  *
377  * Since the size of the I/O is unknown at this time the
378  * resouces allocated (a.k.a. reserved) must be sufficient
379  * to allow the maximum possible I/O size.
380  *
381  * When the I/O is actually attempted via vioscsi_scsi_cmd()
382  * excess resources will be returned via virtio_enqueue_trim().
383  */
384 void *
385 vioscsi_req_get(void *cookie)
386 {
387 	struct vioscsi_softc *sc = cookie;
388 	struct vioscsi_req *vr = NULL;
389 
390 	mtx_enter(&sc->sc_vr_mtx);
391 	vr = SLIST_FIRST(&sc->sc_freelist);
392 	if (vr != NULL)
393 		SLIST_REMOVE_HEAD(&sc->sc_freelist, vr_list);
394 	mtx_leave(&sc->sc_vr_mtx);
395 
396 	DPRINTF("vioscsi_req_get: %p\n", vr);
397 
398 	return (vr);
399 }
400 
401 void
402 vioscsi_req_put(void *cookie, void *io)
403 {
404 	struct vioscsi_softc *sc = cookie;
405 	struct vioscsi_req *vr = io;
406 
407 	DPRINTF("vioscsi_req_put: %p\n", vr);
408 
409 	mtx_enter(&sc->sc_vr_mtx);
410 	/*
411 	 * Do *NOT* call virtio_dequeue_commit()!
412 	 *
413 	 * Descriptors are permanently associated with the vioscsi_req and
414 	 * should not be placed on the free list!
415 	 */
416 	SLIST_INSERT_HEAD(&sc->sc_freelist, vr, vr_list);
417 	mtx_leave(&sc->sc_vr_mtx);
418 }
419 
420 int
421 vioscsi_alloc_reqs(struct vioscsi_softc *sc, struct virtio_softc *vsc,
422     int qsize)
423 {
424 	struct virtqueue *vq = &sc->sc_vqs[2];
425 	struct vioscsi_req *vr;
426 	struct vring_desc *vd;
427 	size_t allocsize;
428 	int i, r, nreqs, rsegs, slot;
429 	void *vaddr;
430 
431 	if (vq->vq_indirect != NULL)
432 		nreqs = qsize;
433 	else
434 		nreqs = qsize / ALLOC_SEGS;
435 
436 	allocsize = nreqs * sizeof(struct vioscsi_req);
437 	r = bus_dmamem_alloc(vsc->sc_dmat, allocsize, 0, 0,
438 	    &sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
439 	if (r != 0) {
440 		printf("bus_dmamem_alloc, size %zd, error %d\n",
441 		    allocsize, r);
442 		return 0;
443 	}
444 	r = bus_dmamem_map(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1,
445 	    allocsize, (caddr_t *)&vaddr, BUS_DMA_NOWAIT);
446 	if (r != 0) {
447 		printf("bus_dmamem_map failed, error %d\n", r);
448 		bus_dmamem_free(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1);
449 		return 0;
450 	}
451 	sc->sc_reqs = vaddr;
452 	memset(vaddr, 0, allocsize);
453 
454 	for (i = 0; i < nreqs; i++) {
455 		/*
456 		 * Assign descriptors and create the DMA maps for each
457 		 * allocated request.
458 		 */
459 		vr = &sc->sc_reqs[i];
460 		r = virtio_enqueue_prep(vq, &slot);
461 		if (r == 0)
462 			r = virtio_enqueue_reserve(vq, slot, ALLOC_SEGS);
463 		if (r != 0)
464 			return i;
465 
466 		if (vq->vq_indirect == NULL) {
467 			/*
468 			 * The reserved slots must be a contiguous block
469 			 * starting at vq_desc[slot].
470 			 */
471 			vd = &vq->vq_desc[slot];
472 			for (r = 0; r < ALLOC_SEGS - 1; r++) {
473 				DPRINTF("vd[%d].next = %d should be %d\n",
474 				    r, vd[r].next, (slot + r + 1));
475 				if (vd[r].next != (slot + r + 1))
476 					return i;
477 			}
478 			if (r == (ALLOC_SEGS -1) && vd[r].next != 0)
479 				return i;
480 			DPRINTF("Reserved slots are contiguous as required!\n");
481 		}
482 
483 		vr->vr_qe_index = slot;
484 		vr->vr_req.id = slot;
485 		vr->vr_req.task_attr = VIRTIO_SCSI_S_SIMPLE;
486 		vq->vq_entries[slot].qe_vr_index = i;
487 
488 		r = bus_dmamap_create(vsc->sc_dmat,
489 		    offsetof(struct vioscsi_req, vr_xs), 1,
490 		    offsetof(struct vioscsi_req, vr_xs), 0,
491 		    BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_control);
492 		if (r != 0) {
493 			printf("bus_dmamap_create vr_control failed, error  %d\n", r);
494 			return i;
495 		}
496 		r = bus_dmamap_create(vsc->sc_dmat, MAX_XFER, SEG_MAX,
497 		    MAX_XFER, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_data);
498 		if (r != 0) {
499 			printf("bus_dmamap_create vr_data failed, error %d\n", r );
500 			return i;
501 		}
502 		r = bus_dmamap_load(vsc->sc_dmat, vr->vr_control,
503 		    vr, offsetof(struct vioscsi_req, vr_xs), NULL,
504 		    BUS_DMA_NOWAIT);
505 		if (r != 0) {
506 			printf("bus_dmamap_load vr_control failed, error %d\n", r );
507 			return i;
508 		}
509 
510 		SLIST_INSERT_HEAD(&sc->sc_freelist, vr, vr_list);
511 	}
512 
513 	return nreqs;
514 }
515