xref: /netbsd-src/sys/dev/pci/twe.c (revision 3cec974c61d7fac0a37c0377723a33214a458c8b)
1 /*	$NetBSD: twe.c,v 1.13 2001/03/07 23:07:17 thorpej Exp $	*/
2 
3 /*-
4  * Copyright (c) 2000 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*-
40  * Copyright (c) 2000 Michael Smith
41  * Copyright (c) 2000 BSDi
42  * All rights reserved.
43  *
44  * Redistribution and use in source and binary forms, with or without
45  * modification, are permitted provided that the following conditions
46  * are met:
47  * 1. Redistributions of source code must retain the above copyright
48  *    notice, this list of conditions and the following disclaimer.
49  * 2. Redistributions in binary form must reproduce the above copyright
50  *    notice, this list of conditions and the following disclaimer in the
51  *    documentation and/or other materials provided with the distribution.
52  *
53  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63  * SUCH DAMAGE.
64  *
65  * from FreeBSD: twe.c,v 1.1 2000/05/24 23:35:23 msmith Exp
66  */
67 
68 /*
69  * Driver for the 3ware Escalade family of RAID controllers.
70  */
71 
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/kernel.h>
75 #include <sys/device.h>
76 #include <sys/queue.h>
77 #include <sys/proc.h>
78 #include <sys/buf.h>
79 #include <sys/endian.h>
80 #include <sys/malloc.h>
81 #include <sys/disk.h>
82 
83 #include <uvm/uvm_extern.h>
84 
85 #include <machine/bswap.h>
86 #include <machine/bus.h>
87 
88 #include <dev/pci/pcireg.h>
89 #include <dev/pci/pcivar.h>
90 #include <dev/pci/pcidevs.h>
91 #include <dev/pci/twereg.h>
92 #include <dev/pci/twevar.h>
93 
94 #define	TWE_INL(sc, port) \
95     bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, port)
96 #define	TWE_OUTL(sc, port, val) \
97     bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, port, val)
98 
99 #define	PCI_CBIO	0x10
100 
101 static void	twe_aen_handler(struct twe_ccb *, int);
102 static void	twe_attach(struct device *, struct device *, void *);
103 static int	twe_init_connection(struct twe_softc *);
104 static int	twe_intr(void *);
105 static int	twe_match(struct device *, struct cfdata *, void *);
106 static int	twe_param_get(struct twe_softc *, int, int, size_t,
107 			      void (*)(struct twe_ccb *, int), void **);
108 static void	twe_poll(struct twe_softc *);
109 static int	twe_print(void *, const char *);
110 static int	twe_reset(struct twe_softc *);
111 static int	twe_submatch(struct device *, struct cfdata *, void *);
112 static int	twe_status_check(struct twe_softc *, u_int);
113 static int	twe_status_wait(struct twe_softc *, u_int, int);
114 
115 struct cfattach twe_ca = {
116 	sizeof(struct twe_softc), twe_match, twe_attach
117 };
118 
119 struct {
120 	const u_int	aen;		/* High byte non-zero if w/unit */
121 	const char	*desc;
122 } static const twe_aen_names[] = {
123 	{ 0x0000, "queue empty" },
124 	{ 0x0001, "soft reset" },
125 	{ 0x0102, "degraded mirror" },
126 	{ 0x0003, "controller error" },
127 	{ 0x0104, "rebuild fail" },
128 	{ 0x0105, "rebuild done" },
129 	{ 0x0106, "incompatible unit" },
130 	{ 0x0107, "init done" },
131 	{ 0x0108, "unclean shutdown" },
132 	{ 0x0109, "aport timeout" },
133 	{ 0x010a, "drive error" },
134 	{ 0x010b, "rebuild started" },
135 	{ 0x0015, "table undefined" },
136 	{ 0x00ff, "aen queue full" },
137 };
138 
139 /*
140  * Match a supported board.
141  */
142 static int
143 twe_match(struct device *parent, struct cfdata *cfdata, void *aux)
144 {
145 	struct pci_attach_args *pa;
146 
147 	pa = aux;
148 
149 	return (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_3WARE &&
150 	    (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_3WARE_ESCALADE ||
151 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_3WARE_ESCALADE_ASIC));
152 }
153 
154 /*
155  * Attach a supported board.
156  *
157  * XXX This doesn't fail gracefully.
158  */
159 static void
160 twe_attach(struct device *parent, struct device *self, void *aux)
161 {
162 	struct pci_attach_args *pa;
163 	struct twe_softc *sc;
164 	pci_chipset_tag_t pc;
165 	pci_intr_handle_t ih;
166 	pcireg_t csr;
167 	const char *intrstr;
168 	int size, i, rv, rseg;
169 	struct twe_param *dtp, *ctp;
170 	bus_dma_segment_t seg;
171 	struct twe_cmd *tc;
172 	struct twe_attach_args twea;
173 	struct twe_ccb *ccb;
174 
175 	sc = (struct twe_softc *)self;
176 	pa = aux;
177 	pc = pa->pa_pc;
178 	sc->sc_dmat = pa->pa_dmat;
179 	SIMPLEQ_INIT(&sc->sc_ccb_queue);
180 	SLIST_INIT(&sc->sc_ccb_freelist);
181 
182 	printf(": 3ware Escalade\n");
183 
184 	if (pci_mapreg_map(pa, PCI_CBIO, PCI_MAPREG_TYPE_IO, 0,
185 	    &sc->sc_iot, &sc->sc_ioh, NULL, NULL)) {
186 		printf("%s: can't map i/o space\n", sc->sc_dv.dv_xname);
187 		return;
188 	}
189 
190 	/* Enable the device. */
191 	csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
192 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
193 	    csr | PCI_COMMAND_MASTER_ENABLE);
194 
195 	/* Map and establish the interrupt. */
196 	if (pci_intr_map(pa, &ih)) {
197 		printf("%s: can't map interrupt\n", sc->sc_dv.dv_xname);
198 		return;
199 	}
200 	intrstr = pci_intr_string(pc, ih);
201 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_BIO, twe_intr, sc);
202 	if (sc->sc_ih == NULL) {
203 		printf("%s: can't establish interrupt", sc->sc_dv.dv_xname);
204 		if (intrstr != NULL)
205 			printf(" at %s", intrstr);
206 		printf("\n");
207 		return;
208 	}
209 	if (intrstr != NULL)
210 		printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname, intrstr);
211 
212 	/*
213 	 * Allocate and initialise the command blocks and CCBs.
214 	 */
215         size = sizeof(struct twe_cmd) * TWE_MAX_QUEUECNT;
216 
217 	if ((rv = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &seg, 1,
218 	    &rseg, BUS_DMA_NOWAIT)) != 0) {
219 		printf("%s: unable to allocate commands, rv = %d\n",
220 		    sc->sc_dv.dv_xname, rv);
221 		return;
222 	}
223 
224 	if ((rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, size,
225 	    (caddr_t *)&sc->sc_cmds,
226 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
227 		printf("%s: unable to map commands, rv = %d\n",
228 		    sc->sc_dv.dv_xname, rv);
229 		return;
230 	}
231 
232 	if ((rv = bus_dmamap_create(sc->sc_dmat, size, size, 1, 0,
233 	    BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
234 		printf("%s: unable to create command DMA map, rv = %d\n",
235 		    sc->sc_dv.dv_xname, rv);
236 		return;
237 	}
238 
239 	if ((rv = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, sc->sc_cmds,
240 	    size, NULL, BUS_DMA_NOWAIT)) != 0) {
241 		printf("%s: unable to load command DMA map, rv = %d\n",
242 		    sc->sc_dv.dv_xname, rv);
243 		return;
244 	}
245 
246 	sc->sc_cmds_paddr = sc->sc_dmamap->dm_segs[0].ds_addr;
247 	memset(sc->sc_cmds, 0, size);
248 
249 	ccb = malloc(sizeof(*ccb) * TWE_MAX_QUEUECNT, M_DEVBUF, M_NOWAIT);
250 	sc->sc_ccbs = ccb;
251 	tc = (struct twe_cmd *)sc->sc_cmds;
252 
253 	for (i = 0; i < TWE_MAX_QUEUECNT; i++, tc++, ccb++) {
254 		ccb->ccb_cmd = tc;
255 		ccb->ccb_cmdid = i;
256 		ccb->ccb_flags = 0;
257 		rv = bus_dmamap_create(sc->sc_dmat, TWE_MAX_XFER,
258 		    TWE_MAX_SEGS, PAGE_SIZE, 0,
259 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
260 		    &ccb->ccb_dmamap_xfer);
261 		if (rv != 0) {
262 			printf("%s: can't create dmamap, rv = %d\n",
263 			    sc->sc_dv.dv_xname, rv);
264 			return;
265 		}
266 		/* Save one CCB for parameter retrieval. */
267 		if (i != 0)
268 			SLIST_INSERT_HEAD(&sc->sc_ccb_freelist, ccb,
269 			    ccb_chain.slist);
270 	}
271 
272 	/* Wait for the controller to become ready. */
273 	if (twe_status_wait(sc, TWE_STS_MICROCONTROLLER_READY, 6)) {
274 		printf("%s: microcontroller not ready\n", sc->sc_dv.dv_xname);
275 		return;
276 	}
277 
278 	TWE_OUTL(sc, TWE_REG_CTL, TWE_CTL_DISABLE_INTRS);
279 
280 	/* Reset the controller. */
281 	if (twe_reset(sc)) {
282 		printf("%s: reset failed\n", sc->sc_dv.dv_xname);
283 		return;
284 	}
285 
286 	/* Find attached units. */
287 	rv = twe_param_get(sc, TWE_PARAM_UNITSUMMARY,
288 	    TWE_PARAM_UNITSUMMARY_Status, TWE_MAX_UNITS, NULL, (void **)&dtp);
289 	if (rv != 0) {
290 		printf("%s: can't detect attached units (%d)\n",
291 		    sc->sc_dv.dv_xname, rv);
292 		return;
293 	}
294 
295 	/* For each detected unit, collect size and store in an array. */
296 	for (i = 0, sc->sc_nunits = 0; i < TWE_MAX_UNITS; i++) {
297 		/* Unit present? */
298 		if ((dtp->tp_data[i] & TWE_PARAM_UNITSTATUS_Online) == 0) {
299 			sc->sc_dsize[i] = 0;
300 	   		continue;
301 	   	}
302 
303 		rv = twe_param_get(sc, TWE_PARAM_UNITINFO + i,
304 		    TWE_PARAM_UNITINFO_Capacity, 4, NULL, (void **)&ctp);
305 		if (rv != 0) {
306 			printf("%s: error %d fetching capacity for unit %d\n",
307 			    sc->sc_dv.dv_xname, rv, i);
308 			continue;
309 		}
310 
311 		sc->sc_dsize[i] = le32toh(*(u_int32_t *)ctp->tp_data);
312 		free(ctp, M_DEVBUF);
313 		sc->sc_nunits++;
314 	}
315 	free(dtp, M_DEVBUF);
316 
317 	/* Initialise connection with controller and enable interrupts. */
318 	twe_init_connection(sc);
319 	TWE_OUTL(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR |
320 	    TWE_CTL_UNMASK_RESP_INTR |
321 	    TWE_CTL_ENABLE_INTRS);
322 
323 	/* Attach sub-devices. */
324 	for (i = 0; i < TWE_MAX_UNITS; i++) {
325 		if (sc->sc_dsize[i] == 0)
326 			continue;
327 		twea.twea_unit = i;
328 		config_found_sm(&sc->sc_dv, &twea, twe_print, twe_submatch);
329 	}
330 }
331 
332 /*
333  * Reset the controller.  Currently only useful at attach time; must be
334  * called with interrupts blocked.
335  */
336 static int
337 twe_reset(struct twe_softc *sc)
338 {
339 	struct twe_param *tp;
340 	u_int aen, status;
341 	volatile u_int32_t junk;
342 	int got, rv;
343 
344 	/* Issue a soft reset. */
345 	TWE_OUTL(sc, TWE_REG_CTL, TWE_CTL_ISSUE_SOFT_RESET |
346 	    TWE_CTL_CLEAR_HOST_INTR |
347 	    TWE_CTL_CLEAR_ATTN_INTR |
348 	    TWE_CTL_MASK_CMD_INTR |
349 	    TWE_CTL_MASK_RESP_INTR |
350 	    TWE_CTL_CLEAR_ERROR_STS |
351 	    TWE_CTL_DISABLE_INTRS);
352 
353 	if (twe_status_wait(sc, TWE_STS_ATTN_INTR, 15)) {
354 		printf("%s: no attention interrupt\n",
355 		    sc->sc_dv.dv_xname);
356 		return (-1);
357 	}
358 
359 	/* Pull AENs out of the controller; look for a soft reset AEN. */
360 	for (got = 0;;) {
361 		rv = twe_param_get(sc, TWE_PARAM_AEN, TWE_PARAM_AEN_UnitCode,
362 		    2, NULL, (void **)&tp);
363 		if (rv != 0)
364 			printf("%s: error %d while draining response queue\n",
365 			    sc->sc_dv.dv_xname, rv);
366 		aen = TWE_AEN_CODE(le16toh(*(u_int16_t *)tp->tp_data));
367 		free(tp, M_DEVBUF);
368 		if (aen == TWE_AEN_QUEUE_EMPTY)
369 			break;
370 		if (aen == TWE_AEN_SOFT_RESET)
371 			got = 1;
372 	}
373 	if (!got) {
374 		printf("%s: reset not reported\n", sc->sc_dv.dv_xname);
375 		return (-1);
376 	}
377 
378 	/* Check controller status. */
379 	status = TWE_INL(sc, TWE_REG_STS);
380 	if (twe_status_check(sc, status)) {
381 		printf("%s: controller errors detected\n",
382 		    sc->sc_dv.dv_xname);
383 		return (-1);
384 	}
385 
386 	/* Drain the response queue. */
387 	for (;;) {
388 		status = TWE_INL(sc, TWE_REG_STS);
389 		if (twe_status_check(sc, status) != 0) {
390 			printf("%s: can't drain response queue\n",
391 			    sc->sc_dv.dv_xname);
392 			return (-1);
393 		}
394 		if ((status & TWE_STS_RESP_QUEUE_EMPTY) != 0)
395 			break;
396 		junk = TWE_INL(sc, TWE_REG_RESP_QUEUE);
397 	}
398 
399 	return (0);
400 }
401 
402 /*
403  * Print autoconfiguration message for a sub-device.
404  */
405 static int
406 twe_print(void *aux, const char *pnp)
407 {
408 	struct twe_attach_args *twea;
409 
410 	twea = aux;
411 
412 	if (pnp != NULL)
413 		printf("block device at %s", pnp);
414 	printf(" unit %d", twea->twea_unit);
415 	return (UNCONF);
416 }
417 
418 /*
419  * Match a sub-device.
420  */
421 static int
422 twe_submatch(struct device *parent, struct cfdata *cf, void *aux)
423 {
424 	struct twe_attach_args *twea;
425 
426 	twea = aux;
427 
428 	if (cf->tweacf_unit != TWECF_UNIT_DEFAULT &&
429 	    cf->tweacf_unit != twea->twea_unit)
430 		return (0);
431 
432 	return ((*cf->cf_attach->ca_match)(parent, cf, aux));
433 }
434 
435 /*
436  * Interrupt service routine.
437  */
438 static int
439 twe_intr(void *arg)
440 {
441 	struct twe_softc *sc;
442 	u_int status;
443 	int caught, rv;
444 
445 	sc = arg;
446 	caught = 0;
447 	status = TWE_INL(sc, TWE_REG_STS);
448 	twe_status_check(sc, status);
449 
450 	/* Host interrupts - purpose unknown. */
451 	if ((status & TWE_STS_HOST_INTR) != 0) {
452 #ifdef DIAGNOSTIC
453 		printf("%s: host interrupt\n", sc->sc_dv.dv_xname);
454 #endif
455 		TWE_OUTL(sc, TWE_REG_CTL, TWE_CTL_CLEAR_HOST_INTR);
456 		caught = 1;
457 	}
458 
459 	/*
460 	 * Attention interrupts, signalled when a controller or child device
461 	 * state change has occured.
462 	 */
463 	if ((status & TWE_STS_ATTN_INTR) != 0) {
464 		if ((sc->sc_flags & TWEF_AEN) == 0) {
465 			rv = twe_param_get(sc, TWE_PARAM_AEN,
466 			    TWE_PARAM_AEN_UnitCode, 2, twe_aen_handler,
467 			    NULL);
468 			if (rv != 0) {
469 				printf("%s: unable to retrieve AEN (%d)\n",
470 				    sc->sc_dv.dv_xname, rv);
471 				TWE_OUTL(sc, TWE_REG_CTL,
472 				    TWE_CTL_CLEAR_ATTN_INTR);
473 			} else
474 				sc->sc_flags |= TWEF_AEN;
475 		}
476 		caught = 1;
477 	}
478 
479 	/*
480 	 * Command interrupts, signalled when the controller can accept more
481 	 * commands.  We don't use this; instead, we try to submit commands
482 	 * when we receive them, and when other commands have completed.
483 	 * Mask it so we don't get another one.
484 	 */
485 	if ((status & TWE_STS_CMD_INTR) != 0) {
486 #ifdef DIAGNOSTIC
487 		printf("%s: command interrupt\n", sc->sc_dv.dv_xname);
488 #endif
489 		TWE_OUTL(sc, TWE_REG_CTL, TWE_CTL_MASK_CMD_INTR);
490 		caught = 1;
491 	}
492 
493 	if ((status & TWE_STS_RESP_INTR) != 0) {
494 		twe_poll(sc);
495 		caught = 1;
496 	}
497 
498 	return (caught);
499 }
500 
501 /*
502  * Handle an AEN returned by the controller.
503  */
504 static void
505 twe_aen_handler(struct twe_ccb *ccb, int error)
506 {
507 	struct twe_softc *sc;
508 	struct twe_param *tp;
509 	const char *str;
510 	u_int aen;
511 	int i, hu, rv;
512 
513 	sc = (struct twe_softc *)ccb->ccb_tx.tx_dv;
514 	tp = ccb->ccb_tx.tx_context;
515 	twe_ccb_unmap(sc, ccb);
516 
517 	if (error) {
518 		printf("%s: error retrieving AEN\n", sc->sc_dv.dv_xname);
519 		aen = TWE_AEN_QUEUE_EMPTY;
520 	} else
521 		aen = le16toh(*(u_int16_t *)tp->tp_data);
522 	free(tp, M_DEVBUF);
523 	twe_ccb_free(sc, ccb);
524 
525 	if (TWE_AEN_CODE(aen) == TWE_AEN_QUEUE_EMPTY) {
526 		TWE_OUTL(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR);
527 		sc->sc_flags &= ~TWEF_AEN;
528 		return;
529 	}
530 
531 	str = "<unknown>";
532 	i = 0;
533 	hu = 0;
534 
535 	while (i < sizeof(twe_aen_names) / sizeof(twe_aen_names[0])) {
536 		if (TWE_AEN_CODE(twe_aen_names[i].aen) == TWE_AEN_CODE(aen)) {
537 			str = twe_aen_names[i].desc;
538 			hu = (TWE_AEN_UNIT(twe_aen_names[i].aen) != 0);
539 			break;
540 		}
541 		i++;
542 	}
543 	printf("%s: AEN 0x%04x (%s) received", sc->sc_dv.dv_xname,
544 	    TWE_AEN_CODE(aen), str);
545 	if (hu != 0)
546 		printf(" for unit %d", TWE_AEN_UNIT(aen));
547 	printf("\n");
548 
549 	/*
550 	 * Chain another retrieval in case interrupts have been
551 	 * coalesced.
552 	 */
553 	rv = twe_param_get(sc, TWE_PARAM_AEN, TWE_PARAM_AEN_UnitCode, 2,
554 	    twe_aen_handler, NULL);
555 	if (rv != 0)
556 		printf("%s: unable to retrieve AEN (%d)\n",
557 		    sc->sc_dv.dv_xname, rv);
558 }
559 
560 /*
561  * Execute a TWE_OP_GET_PARAM command.  If a callback function is provided,
562  * it will be called with generated context when the command has completed.
563  * If no callback is provided, the command will be executed synchronously
564  * and a pointer to a buffer containing the data returned.
565  *
566  * The caller or callback is responsible for freeing the buffer.
567  */
568 static int
569 twe_param_get(struct twe_softc *sc, int table_id, int param_id, size_t size,
570 	      void (*func)(struct twe_ccb *, int), void **pbuf)
571 {
572 	struct twe_ccb *ccb;
573 	struct twe_cmd *tc;
574 	struct twe_param *tp;
575 	int rv, s;
576 
577 	rv = twe_ccb_alloc(sc, &ccb,
578 	    TWE_CCB_PARAM | TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT);
579 	if (rv != 0)
580 		return (rv);
581 
582 	tp = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_NOWAIT);
583 	if (pbuf != NULL)
584 		*pbuf = tp;
585 
586 	ccb->ccb_data = tp;
587 	ccb->ccb_datasize = TWE_SECTOR_SIZE;
588 	ccb->ccb_tx.tx_handler = func;
589 	ccb->ccb_tx.tx_context = tp;
590 	ccb->ccb_tx.tx_dv = &sc->sc_dv;
591 
592 	tc = ccb->ccb_cmd;
593 	tc->tc_size = 2;
594 	tc->tc_opcode = TWE_OP_GET_PARAM | (tc->tc_size << 5);
595 	tc->tc_unit = 0;
596 	tc->tc_count = htole16(1);
597 
598 	/* Fill in the outbound parameter data. */
599 	tp->tp_table_id = htole16(table_id);
600 	tp->tp_param_id = param_id;
601 	tp->tp_param_size = size;
602 
603 	/* Map the transfer. */
604 	if ((rv = twe_ccb_map(sc, ccb)) != 0) {
605 		twe_ccb_free(sc, ccb);
606 		free(tp, M_DEVBUF);
607 		return (rv);
608 	}
609 
610 	/* Submit the command and either wait or let the callback handle it. */
611 	if (func == NULL) {
612 		s = splbio();
613 		rv = twe_ccb_poll(sc, ccb, 5);
614 		twe_ccb_unmap(sc, ccb);
615 		twe_ccb_free(sc, ccb);
616 		splx(s);
617 		if (rv != 0)
618 			free(tp, M_DEVBUF);
619 	} else {
620 		twe_ccb_enqueue(sc, ccb);
621 		rv = 0;
622 	}
623 
624 	return (rv);
625 }
626 
627 /*
628  * Execute a TWE_OP_INIT_CONNECTION command.  Return non-zero on error.
629  * Must be called with interrupts blocked.
630  */
631 static int
632 twe_init_connection(struct twe_softc *sc)
633 {
634 	struct twe_ccb *ccb;
635 	struct twe_cmd *tc;
636 	int rv;
637 
638 	if ((rv = twe_ccb_alloc(sc, &ccb, 0)) != 0)
639 		return (rv);
640 
641 	/* Build the command. */
642 	tc = ccb->ccb_cmd;
643 	tc->tc_size = 3;
644 	tc->tc_opcode = TWE_OP_INIT_CONNECTION;
645 	tc->tc_unit = 0;
646 	tc->tc_count = htole16(TWE_MAX_CMDS);
647 	tc->tc_args.init_connection.response_queue_pointer = 0;
648 
649 	/* Submit the command for immediate execution. */
650 	rv = twe_ccb_poll(sc, ccb, 5);
651 	twe_ccb_free(sc, ccb);
652 	return (rv);
653 }
654 
655 /*
656  * Poll the controller for completed commands.  Must be called with
657  * interrupts blocked.
658  */
659 static void
660 twe_poll(struct twe_softc *sc)
661 {
662 	struct twe_ccb *ccb;
663 	int found;
664 	u_int status, cmdid;
665 
666 	found = 0;
667 
668 	for (;;) {
669 		status = TWE_INL(sc, TWE_REG_STS);
670 		twe_status_check(sc, status);
671 
672 		if ((status & TWE_STS_RESP_QUEUE_EMPTY))
673 			break;
674 
675 		found = 1;
676 		cmdid = TWE_INL(sc, TWE_REG_RESP_QUEUE);
677 		cmdid = (cmdid & TWE_RESP_MASK) >> TWE_RESP_SHIFT;
678 		if (cmdid >= TWE_MAX_QUEUECNT) {
679 			printf("%s: bad completion\n", sc->sc_dv.dv_xname);
680 			continue;
681 		}
682 
683 		ccb = sc->sc_ccbs + cmdid;
684 		if ((ccb->ccb_flags & TWE_CCB_ACTIVE) == 0) {
685 			printf("%s: bad completion (not active)\n",
686 			    sc->sc_dv.dv_xname);
687 			continue;
688 		}
689 		ccb->ccb_flags ^= TWE_CCB_COMPLETE | TWE_CCB_ACTIVE;
690 
691 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
692 		    (caddr_t)ccb->ccb_cmd - sc->sc_cmds,
693 		    sizeof(struct twe_cmd),
694 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
695 
696 		/* Pass notification to upper layers. */
697 		if (ccb->ccb_tx.tx_handler != NULL)
698 			(*ccb->ccb_tx.tx_handler)(ccb,
699 			    ccb->ccb_cmd->tc_status != 0 ? EIO : 0);
700 	}
701 
702 	/* If any commands have completed, run the software queue. */
703 	if (found)
704 		twe_ccb_enqueue(sc, NULL);
705 }
706 
707 /*
708  * Wait for `status' to be set in the controller status register.  Return
709  * zero if found, non-zero if the operation timed out.
710  */
711 static int
712 twe_status_wait(struct twe_softc *sc, u_int32_t status, int timo)
713 {
714 
715 	for (timo *= 10; timo != 0; timo--) {
716 		if ((TWE_INL(sc, TWE_REG_STS) & status) == status)
717 			break;
718 		delay(100000);
719 	}
720 
721 	return (timo == 0);
722 }
723 
724 /*
725  * Complain if the status bits aren't what we expect.
726  */
727 static int
728 twe_status_check(struct twe_softc *sc, u_int status)
729 {
730 	int rv;
731 
732 	rv = 0;
733 
734 	if ((status & TWE_STS_EXPECTED_BITS) != TWE_STS_EXPECTED_BITS) {
735 		printf("%s: missing status bits: 0x%08x\n", sc->sc_dv.dv_xname,
736 		    status & ~TWE_STS_EXPECTED_BITS);
737 		rv = -1;
738 	}
739 
740 	if ((status & TWE_STS_UNEXPECTED_BITS) != 0) {
741 		printf("%s: unexpected status bits: 0x%08x\n",
742 		    sc->sc_dv.dv_xname, status & TWE_STS_UNEXPECTED_BITS);
743 		rv = -1;
744 	}
745 
746 	return (rv);
747 }
748 
749 /*
750  * Allocate and initialise a CCB.
751  */
752 int
753 twe_ccb_alloc(struct twe_softc *sc, struct twe_ccb **ccbp, int flags)
754 {
755 	struct twe_cmd *tc;
756 	struct twe_ccb *ccb;
757 	int s;
758 
759 	s = splbio();
760 	if ((flags & TWE_CCB_PARAM) != 0)
761 		ccb = sc->sc_ccbs;
762 	else {
763 		/* Allocate a CCB and command block. */
764 		if (SLIST_FIRST(&sc->sc_ccb_freelist) == NULL) {
765 			splx(s);
766 			return (EAGAIN);
767 		}
768 		ccb = SLIST_FIRST(&sc->sc_ccb_freelist);
769 		SLIST_REMOVE_HEAD(&sc->sc_ccb_freelist, ccb_chain.slist);
770 	}
771 #ifdef DIAGNOSTIC
772 	if ((ccb->ccb_flags & TWE_CCB_ALLOCED) != 0)
773 		panic("twe_ccb_alloc: CCB already allocated");
774 	flags |= TWE_CCB_ALLOCED;
775 #endif
776 	splx(s);
777 
778 	/* Initialise some fields and return. */
779 	ccb->ccb_tx.tx_handler = NULL;
780 	ccb->ccb_flags = flags;
781 	tc = ccb->ccb_cmd;
782 	tc->tc_status = 0;
783 	tc->tc_flags = 0;
784 	tc->tc_cmdid = ccb->ccb_cmdid;
785 	*ccbp = ccb;
786 
787 	return (0);
788 }
789 
790 /*
791  * Free a CCB.
792  */
793 void
794 twe_ccb_free(struct twe_softc *sc, struct twe_ccb *ccb)
795 {
796 	int s;
797 
798 	s = splbio();
799 	if ((ccb->ccb_flags & TWE_CCB_PARAM) == 0)
800 		SLIST_INSERT_HEAD(&sc->sc_ccb_freelist, ccb, ccb_chain.slist);
801 	ccb->ccb_flags = 0;
802 	splx(s);
803 }
804 
805 /*
806  * Map the specified CCB's command block and data buffer (if any) into
807  * controller visible space.  Perform DMA synchronisation.
808  */
809 int
810 twe_ccb_map(struct twe_softc *sc, struct twe_ccb *ccb)
811 {
812 	struct twe_cmd *tc;
813 	int flags, nsegs, i, s, rv;
814 	void *data;
815 
816 	/*
817 	 * The data as a whole must be 512-byte aligned.
818 	 */
819 	if (((u_long)ccb->ccb_data & (TWE_ALIGNMENT - 1)) != 0) {
820 		s = splvm();
821 		/* XXX */
822 		ccb->ccb_abuf = uvm_km_kmemalloc(kmem_map, uvmexp.kmem_object,
823 		    ccb->ccb_datasize, UVM_KMF_NOWAIT);
824 		splx(s);
825 		data = (void *)ccb->ccb_abuf;
826 		if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0)
827 			memcpy(data, ccb->ccb_data, ccb->ccb_datasize);
828 	} else {
829 		ccb->ccb_abuf = (vaddr_t)0;
830 		data = ccb->ccb_data;
831 	}
832 
833 	/*
834 	 * Map the data buffer into bus space and build the S/G list.
835 	 */
836 	rv = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap_xfer, data,
837 	    ccb->ccb_datasize, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
838 	if (rv != 0) {
839 		if (ccb->ccb_abuf != (vaddr_t)0) {
840 			s = splvm();
841 			/* XXX */
842 			uvm_km_free(kmem_map, ccb->ccb_abuf,
843 			    ccb->ccb_datasize);
844 			splx(s);
845 		}
846 		return (rv);
847 	}
848 
849 	nsegs = ccb->ccb_dmamap_xfer->dm_nsegs;
850 	tc = ccb->ccb_cmd;
851 	tc->tc_size += 2 * nsegs;
852 
853 	/* The location of the S/G list is dependant upon command type. */
854 	switch (tc->tc_opcode >> 5) {
855 	case 2:
856 		for (i = 0; i < nsegs; i++) {
857 			tc->tc_args.param.sgl[i].tsg_address =
858 			    htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_addr);
859 			tc->tc_args.param.sgl[i].tsg_length =
860 			    htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_len);
861 		}
862 		/* XXX Needed? */
863 		for (; i < TWE_SG_SIZE; i++) {
864 			tc->tc_args.param.sgl[i].tsg_address = 0;
865 			tc->tc_args.param.sgl[i].tsg_length = 0;
866 		}
867 		break;
868 	case 3:
869 		for (i = 0; i < nsegs; i++) {
870 			tc->tc_args.io.sgl[i].tsg_address =
871 			    htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_addr);
872 			tc->tc_args.io.sgl[i].tsg_length =
873 			    htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_len);
874 		}
875 		/* XXX Needed? */
876 		for (; i < TWE_SG_SIZE; i++) {
877 			tc->tc_args.io.sgl[i].tsg_address = 0;
878 			tc->tc_args.io.sgl[i].tsg_length = 0;
879 		}
880 		break;
881 #ifdef DEBUG
882 	default:
883 		panic("twe_ccb_map: oops");
884 #endif
885 	}
886 
887 	if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0)
888 		flags = BUS_DMASYNC_PREREAD;
889 	else
890 		flags = 0;
891 	if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0)
892 		flags |= BUS_DMASYNC_PREWRITE;
893 
894 	bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0,
895 	    ccb->ccb_datasize, flags);
896 	return (0);
897 }
898 
899 /*
900  * Unmap the specified CCB's command block and data buffer (if any) and
901  * perform DMA synchronisation.
902  */
903 void
904 twe_ccb_unmap(struct twe_softc *sc, struct twe_ccb *ccb)
905 {
906 	int flags, s;
907 
908 	if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0)
909 		flags = BUS_DMASYNC_POSTREAD;
910 	else
911 		flags = 0;
912 	if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0)
913 		flags |= BUS_DMASYNC_POSTWRITE;
914 
915 	bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0,
916 	    ccb->ccb_datasize, flags);
917 	bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap_xfer);
918 
919 	if (ccb->ccb_abuf != (vaddr_t)0) {
920 		if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0)
921 			memcpy(ccb->ccb_data, (void *)ccb->ccb_abuf,
922 			    ccb->ccb_datasize);
923 		s = splvm();
924 		/* XXX */
925 		uvm_km_free(kmem_map, ccb->ccb_abuf, ccb->ccb_datasize);
926 		splx(s);
927 	}
928 }
929 
930 /*
931  * Submit a command to the controller and poll on completion.  Return
932  * non-zero on timeout (but don't check status, as some command types don't
933  * return status).  Must be called with interrupts blocked.
934  */
935 int
936 twe_ccb_poll(struct twe_softc *sc, struct twe_ccb *ccb, int timo)
937 {
938 	int rv;
939 
940 	if ((rv = twe_ccb_submit(sc, ccb)) != 0)
941 		return (rv);
942 
943 	for (; timo != 0; timo--) {
944 		twe_poll(sc);
945 		if ((ccb->ccb_flags & TWE_CCB_COMPLETE) != 0)
946 			break;
947 		DELAY(100000);
948 	}
949 
950 	return (timo == 0);
951 }
952 
953 /*
954  * If a CCB is specified, enqueue it.  Pull CCBs off the software queue in
955  * the order that they were enqueued and try to submit their command blocks
956  * to the controller for execution.
957  */
958 void
959 twe_ccb_enqueue(struct twe_softc *sc, struct twe_ccb *ccb)
960 {
961 	int s;
962 
963 	s = splbio();
964 
965 	if (ccb != NULL)
966 		SIMPLEQ_INSERT_TAIL(&sc->sc_ccb_queue, ccb, ccb_chain.simpleq);
967 
968 	while ((ccb = SIMPLEQ_FIRST(&sc->sc_ccb_queue)) != NULL) {
969 		if (twe_ccb_submit(sc, ccb))
970 			break;
971 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_queue, ccb, ccb_chain.simpleq);
972 	}
973 
974 	splx(s);
975 }
976 
977 /*
978  * Submit the command block associated with the specified CCB to the
979  * controller for execution.  Must be called with interrupts blocked.
980  */
981 int
982 twe_ccb_submit(struct twe_softc *sc, struct twe_ccb *ccb)
983 {
984 	bus_addr_t pa;
985 	int rv;
986 	u_int status;
987 
988 	/* Check to see if we can post a command. */
989 	status = TWE_INL(sc, TWE_REG_STS);
990 	twe_status_check(sc, status);
991 
992 	if ((status & TWE_STS_CMD_QUEUE_FULL) == 0) {
993 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
994 		    (caddr_t)ccb->ccb_cmd - sc->sc_cmds, sizeof(struct twe_cmd),
995 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
996 		ccb->ccb_flags |= TWE_CCB_ACTIVE;
997 		pa = sc->sc_cmds_paddr +
998 		    ccb->ccb_cmdid * sizeof(struct twe_cmd);
999 		TWE_OUTL(sc, TWE_REG_CMD_QUEUE, (u_int32_t)pa);
1000 		rv = 0;
1001 	} else
1002 		rv = EBUSY;
1003 
1004 	return (rv);
1005 }
1006