xref: /netbsd-src/sys/dev/pci/vioscsi.c (revision 946379e7b37692fc43f68eb0d1c10daa0a7f3b6c)
1 /*	$OpenBSD: vioscsi.c,v 1.3 2015/03/14 03:38:49 jsg Exp $	*/
2 
3 /*
4  * Copyright (c) 2013 Google Inc.
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/cdefs.h>
20 __KERNEL_RCSID(0, "$NetBSD: vioscsi.c,v 1.6 2015/11/01 08:55:05 pooka Exp $");
21 
22 #include <sys/param.h>
23 #include <sys/systm.h>
24 #include <sys/device.h>
25 #include <sys/bus.h>
26 #include <sys/buf.h>
27 
28 #include <dev/pci/pcidevs.h>
29 #include <dev/pci/pcireg.h>
30 #include <dev/pci/pcivar.h>
31 
32 #include <dev/pci/vioscsireg.h>
33 #include <dev/pci/virtiovar.h>
34 
35 #include <dev/scsipi/scsi_all.h>
36 #include <dev/scsipi/scsiconf.h>
37 
38 #ifdef VIOSCSI_DEBUG
39 static int vioscsi_debug = 1;
40 #define DPRINTF(f) do { if (vioscsi_debug) printf f; } while (/*CONSTCOND*/0)
41 #else
42 #define DPRINTF(f) ((void)0)
43 #endif
44 
45 struct vioscsi_req {
46 	struct virtio_scsi_req_hdr	 vr_req;
47 	struct virtio_scsi_res_hdr	 vr_res;
48 	struct scsipi_xfer		*vr_xs;
49 	bus_dmamap_t			 vr_control;
50 	bus_dmamap_t			 vr_data;
51 };
52 
53 struct vioscsi_softc {
54 	device_t 		 sc_dev;
55 	struct scsipi_adapter	 sc_adapter;
56 	struct scsipi_channel 	 sc_channel;
57 
58 	struct virtqueue	 sc_vqs[3];
59 	struct vioscsi_req	*sc_reqs;
60 	bus_dma_segment_t        sc_reqs_segs[1];
61 
62 	u_int32_t		 sc_seg_max;
63 };
64 
65 /*
66  * Each block request uses at least two segments - one for the header
67  * and one for the status.
68 */
69 #define VIRTIO_SCSI_MIN_SEGMENTS 2
70 
71 static int	 vioscsi_match(device_t, cfdata_t, void *);
72 static void	 vioscsi_attach(device_t, device_t, void *);
73 
74 static int	 vioscsi_alloc_reqs(struct vioscsi_softc *,
75     struct virtio_softc *, int, uint32_t);
76 static void	 vioscsi_scsipi_request(struct scsipi_channel *,
77     scsipi_adapter_req_t, void *);
78 static int	 vioscsi_vq_done(struct virtqueue *);
79 static void	 vioscsi_req_done(struct vioscsi_softc *, struct virtio_softc *,
80     struct vioscsi_req *);
81 static struct vioscsi_req *vioscsi_req_get(struct vioscsi_softc *);
82 static void	 vioscsi_req_put(struct vioscsi_softc *, struct vioscsi_req *);
83 
84 static const char *const vioscsi_vq_names[] = {
85 	"control",
86 	"event",
87 	"request",
88 };
89 
90 CFATTACH_DECL_NEW(vioscsi, sizeof(struct vioscsi_softc),
91     vioscsi_match, vioscsi_attach, NULL, NULL);
92 
93 static int
94 vioscsi_match(device_t parent, cfdata_t match, void *aux)
95 {
96 	struct virtio_softc *va = aux;
97 
98 	if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_SCSI)
99 		return 1;
100 	return 0;
101 }
102 
103 static void
104 vioscsi_attach(device_t parent, device_t self, void *aux)
105 {
106 	struct vioscsi_softc *sc = device_private(self);
107 	struct virtio_softc *vsc = device_private(parent);
108 	struct scsipi_adapter *adapt = &sc->sc_adapter;
109 	struct scsipi_channel *chan = &sc->sc_channel;
110 	uint32_t features;
111 	char buf[256];
112 	int rv;
113 
114 	if (vsc->sc_child != NULL) {
115 		aprint_error(": parent %s already has a child\n",
116 		    device_xname(parent));
117 		return;
118 	}
119 
120 	sc->sc_dev = self;
121 
122 	vsc->sc_child = self;
123 	vsc->sc_ipl = IPL_BIO;
124 	vsc->sc_vqs = sc->sc_vqs;
125 	vsc->sc_nvqs = __arraycount(sc->sc_vqs);
126 	vsc->sc_config_change = NULL;
127 	vsc->sc_intrhand = virtio_vq_intr;
128 	vsc->sc_flags = 0;
129 
130 	features = virtio_negotiate_features(vsc, 0);
131 	snprintb(buf, sizeof(buf), VIRTIO_COMMON_FLAG_BITS, features);
132 	aprint_normal(": Features: %s\n", buf);
133 	aprint_naive("\n");
134 
135 	uint32_t cmd_per_lun = virtio_read_device_config_4(vsc,
136 	    VIRTIO_SCSI_CONFIG_CMD_PER_LUN);
137 
138 	uint32_t seg_max = virtio_read_device_config_4(vsc,
139 	    VIRTIO_SCSI_CONFIG_SEG_MAX);
140 
141 	uint16_t max_target = virtio_read_device_config_2(vsc,
142 	    VIRTIO_SCSI_CONFIG_MAX_TARGET);
143 
144 	uint16_t max_channel = virtio_read_device_config_2(vsc,
145 	    VIRTIO_SCSI_CONFIG_MAX_CHANNEL);
146 
147 	uint32_t max_lun = virtio_read_device_config_4(vsc,
148 	    VIRTIO_SCSI_CONFIG_MAX_LUN);
149 
150 	sc->sc_seg_max = seg_max;
151 
152 	for (size_t i = 0; i < __arraycount(sc->sc_vqs); i++) {
153 		rv = virtio_alloc_vq(vsc, &sc->sc_vqs[i], i, MAXPHYS,
154 		    1 + howmany(MAXPHYS, NBPG), vioscsi_vq_names[i]);
155 		if (rv) {
156 			aprint_error_dev(sc->sc_dev,
157 			    "failed to allocate virtqueue %zu\n", i);
158 			return;
159 		}
160 		sc->sc_vqs[i].vq_done = vioscsi_vq_done;
161 	}
162 
163 	int qsize = sc->sc_vqs[2].vq_num;
164 	aprint_normal_dev(sc->sc_dev, "qsize %d\n", qsize);
165 	if (vioscsi_alloc_reqs(sc, vsc, qsize, seg_max))
166 		return;
167 
168 	/*
169 	 * Fill in the scsipi_adapter.
170 	 */
171 	memset(adapt, 0, sizeof(*adapt));
172 	adapt->adapt_dev = sc->sc_dev;
173 	adapt->adapt_nchannels = max_channel;
174 	adapt->adapt_openings = cmd_per_lun;
175 	adapt->adapt_max_periph = adapt->adapt_openings;
176 	adapt->adapt_request = vioscsi_scsipi_request;
177 	adapt->adapt_minphys = minphys;
178 
179 	/*
180 	 * Fill in the scsipi_channel.
181 	 */
182 	memset(chan, 0, sizeof(*chan));
183 	chan->chan_adapter = adapt;
184 	chan->chan_bustype = &scsi_bustype;
185 	chan->chan_channel = 0;
186 	chan->chan_ntargets = max_target;
187 	chan->chan_nluns = max_lun;
188 	chan->chan_id = 0;
189 	chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
190 
191 	config_found(sc->sc_dev, &sc->sc_channel, scsiprint);
192 }
193 
194 #define XS2DMA(xs) \
195     ((((xs)->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE) | \
196     (((xs)->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | \
197     BUS_DMA_STREAMING)
198 
199 #define XS2DMAPRE(xs) (((xs)->xs_control & XS_CTL_DATA_IN) ? \
200     BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)
201 
202 #define XS2DMAPOST(xs) (((xs)->xs_control & XS_CTL_DATA_IN) ? \
203     BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)
204 
205 static void
206 vioscsi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t
207     request, void *arg)
208 {
209 	struct vioscsi_softc *sc =
210 	    device_private(chan->chan_adapter->adapt_dev);
211 	struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
212 	struct scsipi_xfer *xs;
213 	struct scsipi_periph *periph;
214 	struct vioscsi_req *vr;
215 	struct virtio_scsi_req_hdr *req;
216 	struct virtqueue *vq = &sc->sc_vqs[2];
217 	int slot, error;
218 
219 	DPRINTF(("%s: enter\n", __func__));
220 
221 	if (request != ADAPTER_REQ_RUN_XFER) {
222 		DPRINTF(("%s: unhandled %d\n", __func__, request));
223 		return;
224 	}
225 
226 	xs = arg;
227 	periph = xs->xs_periph;
228 
229 	vr = vioscsi_req_get(sc);
230 #ifdef DIAGNOSTIC
231 	/*
232 	 * This should never happen as we track the resources
233 	 * in the mid-layer.
234 	 */
235 	if (vr == NULL) {
236 		scsipi_printaddr(xs->xs_periph);
237 		panic("%s: unable to allocate request\n", __func__);
238 	}
239 #endif
240 	req = &vr->vr_req;
241 	slot = vr - sc->sc_reqs;
242 
243 	vr->vr_xs = xs;
244 
245 	/*
246 	 * "The only supported format for the LUN field is: first byte set to
247 	 * 1, second byte set to target, third and fourth byte representing a
248 	 * single level LUN structure, followed by four zero bytes."
249 	 */
250 	if (periph->periph_target >= 256 || periph->periph_lun >= 16384) {
251 		DPRINTF(("%s: bad target %u or lun %u\n", __func__,
252 		    periph->periph_target, periph->periph_lun));
253 		goto stuffup;
254 	}
255 	req->lun[0] = 1;
256 	req->lun[1] = periph->periph_target - 1;
257 	req->lun[2] = 0x40 | (periph->periph_lun >> 8);
258 	req->lun[3] = periph->periph_lun;
259 	memset(req->lun + 4, 0, 4);
260 	DPRINTF(("%s: command for %u:%u at slot %d\n", __func__,
261 	    periph->periph_target - 1, periph->periph_lun, slot));
262 
263 	if ((size_t)xs->cmdlen > sizeof(req->cdb)) {
264 		DPRINTF(("%s: bad cmdlen %zu > %zu\n", __func__,
265 		    (size_t)xs->cmdlen, sizeof(req->cdb)));
266 		goto stuffup;
267 	}
268 
269 	memset(req->cdb, 0, sizeof(req->cdb));
270 	memcpy(req->cdb, xs->cmd, xs->cmdlen);
271 
272 	error = bus_dmamap_load(vsc->sc_dmat, vr->vr_data,
273 	    xs->data, xs->datalen, NULL, XS2DMA(xs));
274 	switch (error) {
275 	case 0:
276 		break;
277 	case ENOMEM:
278 	case EAGAIN:
279 		xs->error = XS_RESOURCE_SHORTAGE;
280 		goto nomore;
281 	default:
282 		aprint_error_dev(sc->sc_dev, "error %d loading DMA map\n",
283 		    error);
284 	stuffup:
285 		xs->error = XS_DRIVER_STUFFUP;
286 nomore:
287 		// XXX: free req?
288 		scsipi_done(xs);
289 		return;
290 	}
291 
292 	int nsegs = VIRTIO_SCSI_MIN_SEGMENTS;
293 	if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0)
294 		nsegs += vr->vr_data->dm_nsegs;
295 
296 	error = virtio_enqueue_reserve(vsc, vq, slot, nsegs);
297 	if (error) {
298 		DPRINTF(("%s: error reserving %d\n", __func__, error));
299 		goto stuffup;
300 	}
301 
302 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
303 	    offsetof(struct vioscsi_req, vr_req),
304 	    sizeof(struct virtio_scsi_req_hdr),
305 	    BUS_DMASYNC_PREWRITE);
306 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
307 	    offsetof(struct vioscsi_req, vr_res),
308             sizeof(struct virtio_scsi_res_hdr),
309 	    BUS_DMASYNC_PREREAD);
310 	if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0)
311 		bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen,
312 		    XS2DMAPRE(xs));
313 
314 	virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
315 	    offsetof(struct vioscsi_req, vr_req),
316             sizeof(struct virtio_scsi_req_hdr), 1);
317 	if (xs->xs_control & XS_CTL_DATA_OUT)
318 		virtio_enqueue(vsc, vq, slot, vr->vr_data, 1);
319 	virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
320 	    offsetof(struct vioscsi_req, vr_res),
321             sizeof(struct virtio_scsi_res_hdr), 0);
322 	if (xs->xs_control & XS_CTL_DATA_IN)
323 		virtio_enqueue(vsc, vq, slot, vr->vr_data, 0);
324 	virtio_enqueue_commit(vsc, vq, slot, 1);
325 
326 	if ((xs->xs_control & XS_CTL_POLL) == 0)
327 		return;
328 
329 	DPRINTF(("%s: polling...\n", __func__));
330 	// XXX: do this better.
331 	int timeout = 1000;
332 	do {
333 		(*vsc->sc_intrhand)(vsc);
334 		if (vr->vr_xs != xs)
335 			break;
336 		delay(1000);
337 	} while (--timeout > 0);
338 
339 	if (vr->vr_xs == xs) {
340 		// XXX: Abort!
341 		xs->error = XS_TIMEOUT;
342 		xs->resid = xs->datalen;
343 		DPRINTF(("%s: polling timeout\n", __func__));
344 		scsipi_done(xs);
345 	}
346 	DPRINTF(("%s: done (timeout=%d)\n", __func__, timeout));
347 }
348 
349 static void
350 vioscsi_req_done(struct vioscsi_softc *sc, struct virtio_softc *vsc,
351     struct vioscsi_req *vr)
352 {
353 	struct scsipi_xfer *xs = vr->vr_xs;
354 	struct scsi_sense_data *sense = &xs->sense.scsi_sense;
355 	size_t sense_len;
356 
357 	DPRINTF(("%s: enter\n", __func__));
358 
359 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
360 	    offsetof(struct vioscsi_req, vr_req),
361 	    sizeof(struct virtio_scsi_req_hdr),
362 	    BUS_DMASYNC_POSTWRITE);
363 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
364 	    offsetof(struct vioscsi_req, vr_res),
365 	    sizeof(struct virtio_scsi_res_hdr),
366 	    BUS_DMASYNC_POSTREAD);
367 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen,
368 	    XS2DMAPOST(xs));
369 
370 	switch (vr->vr_res.response) {
371 	case VIRTIO_SCSI_S_OK:
372 		sense_len = MIN(sizeof(xs->sense), vr->vr_res.sense_len);
373 		memcpy(&xs->sense, vr->vr_res.sense, sense_len);
374 		xs->error = (sense_len == 0) ? XS_NOERROR : XS_SENSE;
375 		break;
376 	case VIRTIO_SCSI_S_BAD_TARGET:
377 		DPRINTF(("%s: bad target\n", __func__));
378 		memset(sense, 0, sizeof(*sense));
379 		sense->response_code = 0x70;
380 		sense->flags = SKEY_ILLEGAL_REQUEST;
381 		xs->error = XS_SENSE;
382 		xs->status = 0;
383 		xs->resid = 0;
384 		break;
385 	default:
386 		DPRINTF(("%s: stuffup: %d\n", __func__, vr->vr_res.response));
387 		xs->error = XS_DRIVER_STUFFUP;
388 		xs->resid = xs->datalen;
389 		break;
390 	}
391 
392 	xs->status = vr->vr_res.status;
393 	xs->resid = vr->vr_res.residual;
394 
395 	DPRINTF(("%s: done %d, %d, %d\n", __func__,
396 	    xs->error, xs->status, xs->resid));
397 
398 	vr->vr_xs = NULL;
399 	vioscsi_req_put(sc, vr);
400 	scsipi_done(xs);
401 }
402 
403 static int
404 vioscsi_vq_done(struct virtqueue *vq)
405 {
406 	struct virtio_softc *vsc = vq->vq_owner;
407 	struct vioscsi_softc *sc = device_private(vsc->sc_child);
408 	int ret = 0;
409 
410 	DPRINTF(("%s: enter\n", __func__));
411 
412 	for (;;) {
413 		int r, slot;
414 		r = virtio_dequeue(vsc, vq, &slot, NULL);
415 		if (r != 0)
416 			break;
417 
418 		DPRINTF(("%s: slot=%d\n", __func__, slot));
419 		vioscsi_req_done(sc, vsc, &sc->sc_reqs[slot]);
420 		ret = 1;
421 	}
422 
423 	DPRINTF(("%s: exit %d\n", __func__, ret));
424 
425 	return ret;
426 }
427 
428 static struct vioscsi_req *
429 vioscsi_req_get(struct vioscsi_softc *sc)
430 {
431 	struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
432 	struct virtqueue *vq = &sc->sc_vqs[2];
433 	struct vioscsi_req *vr;
434 	int r, slot;
435 
436 	if ((r = virtio_enqueue_prep(vsc, vq, &slot)) != 0) {
437 		DPRINTF(("%s: virtio_enqueue_get error %d\n", __func__, r));
438 		goto err1;
439 	}
440 	vr = &sc->sc_reqs[slot];
441 
442 	vr->vr_req.id = slot;
443 	vr->vr_req.task_attr = VIRTIO_SCSI_S_SIMPLE;
444 
445 	r = bus_dmamap_create(vsc->sc_dmat,
446 	    offsetof(struct vioscsi_req, vr_xs), 1,
447 	    offsetof(struct vioscsi_req, vr_xs), 0,
448 	    BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_control);
449 	if (r != 0) {
450 		DPRINTF(("%s: bus_dmamap_create xs error %d\n", __func__, r));
451 		goto err2;
452 	}
453 	r = bus_dmamap_create(vsc->sc_dmat, MAXPHYS, sc->sc_seg_max,
454 	    MAXPHYS, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_data);
455 	if (r != 0) {
456 		DPRINTF(("%s: bus_dmamap_create data error %d\n", __func__, r));
457 		goto err3;
458 	}
459 	r = bus_dmamap_load(vsc->sc_dmat, vr->vr_control,
460 	    vr, offsetof(struct vioscsi_req, vr_xs), NULL,
461 	    BUS_DMA_NOWAIT);
462 	if (r != 0) {
463 		DPRINTF(("%s: bus_dmamap_create ctrl error %d\n", __func__, r));
464 		goto err4;
465 	}
466 
467 	DPRINTF(("%s: %p, %d\n", __func__, vr, slot));
468 
469 	return vr;
470 
471 err4:
472 	bus_dmamap_destroy(vsc->sc_dmat, vr->vr_data);
473 err3:
474 	bus_dmamap_destroy(vsc->sc_dmat, vr->vr_control);
475 err2:
476 	virtio_enqueue_abort(vsc, vq, slot);
477 err1:
478 	return NULL;
479 }
480 
481 static void
482 vioscsi_req_put(struct vioscsi_softc *sc, struct vioscsi_req *vr)
483 {
484 	struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
485 	struct virtqueue *vq = &sc->sc_vqs[2];
486 	int slot = vr - sc->sc_reqs;
487 
488 	DPRINTF(("%s: %p, %d\n", __func__, vr, slot));
489 
490 	bus_dmamap_destroy(vsc->sc_dmat, vr->vr_control);
491 	bus_dmamap_destroy(vsc->sc_dmat, vr->vr_data);
492 
493 	virtio_dequeue_commit(vsc, vq, slot);
494 }
495 
496 int
497 vioscsi_alloc_reqs(struct vioscsi_softc *sc, struct virtio_softc *vsc,
498     int qsize, uint32_t seg_max)
499 {
500 	size_t allocsize;
501 	int r, rsegs;
502 	void *vaddr;
503 
504 	allocsize = qsize * sizeof(struct vioscsi_req);
505 	r = bus_dmamem_alloc(vsc->sc_dmat, allocsize, 0, 0,
506 	    &sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
507 	if (r != 0) {
508 		aprint_error_dev(sc->sc_dev,
509 		    "%s: bus_dmamem_alloc, size %zu, error %d\n", __func__,
510 		    allocsize, r);
511 		return 1;
512 	}
513 	r = bus_dmamem_map(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1,
514 	    allocsize, &vaddr, BUS_DMA_NOWAIT);
515 	if (r != 0) {
516 		aprint_error_dev(sc->sc_dev,
517 		    "%s: bus_dmamem_map failed, error %d\n", __func__, r);
518 		bus_dmamem_free(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1);
519 		return 1;
520 	}
521 	sc->sc_reqs = vaddr;
522 	memset(vaddr, 0, allocsize);
523 	return 0;
524 }
525