xref: /netbsd-src/sys/dev/pci/twe.c (revision b519c70ad771d0a55b3c2277db6b97a05fa6465d)
1 /*	$NetBSD: twe.c,v 1.21 2001/11/13 07:48:49 lukem Exp $	*/
2 
3 /*-
4  * Copyright (c) 2000 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*-
40  * Copyright (c) 2000 Michael Smith
41  * Copyright (c) 2000 BSDi
42  * All rights reserved.
43  *
44  * Redistribution and use in source and binary forms, with or without
45  * modification, are permitted provided that the following conditions
46  * are met:
47  * 1. Redistributions of source code must retain the above copyright
48  *    notice, this list of conditions and the following disclaimer.
49  * 2. Redistributions in binary form must reproduce the above copyright
50  *    notice, this list of conditions and the following disclaimer in the
51  *    documentation and/or other materials provided with the distribution.
52  *
53  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63  * SUCH DAMAGE.
64  *
65  * from FreeBSD: twe.c,v 1.1 2000/05/24 23:35:23 msmith Exp
66  */
67 
68 /*
69  * Driver for the 3ware Escalade family of RAID controllers.
70  */
71 
72 #include <sys/cdefs.h>
73 __KERNEL_RCSID(0, "$NetBSD: twe.c,v 1.21 2001/11/13 07:48:49 lukem Exp $");
74 
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/kernel.h>
78 #include <sys/device.h>
79 #include <sys/queue.h>
80 #include <sys/proc.h>
81 #include <sys/buf.h>
82 #include <sys/endian.h>
83 #include <sys/malloc.h>
84 #include <sys/disk.h>
85 
86 #include <uvm/uvm_extern.h>
87 
88 #include <machine/bswap.h>
89 #include <machine/bus.h>
90 
91 #include <dev/pci/pcireg.h>
92 #include <dev/pci/pcivar.h>
93 #include <dev/pci/pcidevs.h>
94 #include <dev/pci/twereg.h>
95 #include <dev/pci/twevar.h>
96 
97 #define	TWE_INL(sc, port) \
98     bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, port)
99 #define	TWE_OUTL(sc, port, val) \
100     bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, port, val)
101 
102 #define	PCI_CBIO	0x10
103 
104 static void	twe_aen_handler(struct twe_ccb *, int);
105 static void	twe_attach(struct device *, struct device *, void *);
106 static int	twe_init_connection(struct twe_softc *);
107 static int	twe_intr(void *);
108 static int	twe_match(struct device *, struct cfdata *, void *);
109 static int	twe_param_get(struct twe_softc *, int, int, size_t,
110 			      void (*)(struct twe_ccb *, int), void **);
111 static void	twe_poll(struct twe_softc *);
112 static int	twe_print(void *, const char *);
113 static int	twe_reset(struct twe_softc *);
114 static int	twe_submatch(struct device *, struct cfdata *, void *);
115 static int	twe_status_check(struct twe_softc *, u_int);
116 static int	twe_status_wait(struct twe_softc *, u_int, int);
117 
118 struct cfattach twe_ca = {
119 	sizeof(struct twe_softc), twe_match, twe_attach
120 };
121 
122 struct {
123 	const u_int	aen;		/* High byte non-zero if w/unit */
124 	const char	*desc;
125 } static const twe_aen_names[] = {
126 	{ 0x0000, "queue empty" },
127 	{ 0x0001, "soft reset" },
128 	{ 0x0102, "degraded mirror" },
129 	{ 0x0003, "controller error" },
130 	{ 0x0104, "rebuild fail" },
131 	{ 0x0105, "rebuild done" },
132 	{ 0x0106, "incompatible unit" },
133 	{ 0x0107, "init done" },
134 	{ 0x0108, "unclean shutdown" },
135 	{ 0x0109, "aport timeout" },
136 	{ 0x010a, "drive error" },
137 	{ 0x010b, "rebuild started" },
138 	{ 0x010c, "init started" },
139 	{ 0x0015, "table undefined" },
140 	{ 0x00ff, "aen queue full" },
141 };
142 
143 /*
144  * Match a supported board.
145  */
146 static int
147 twe_match(struct device *parent, struct cfdata *cfdata, void *aux)
148 {
149 	struct pci_attach_args *pa;
150 
151 	pa = aux;
152 
153 	return (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_3WARE &&
154 	    (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_3WARE_ESCALADE ||
155 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_3WARE_ESCALADE_ASIC));
156 }
157 
158 /*
159  * Attach a supported board.
160  *
161  * XXX This doesn't fail gracefully.
162  */
163 static void
164 twe_attach(struct device *parent, struct device *self, void *aux)
165 {
166 	struct pci_attach_args *pa;
167 	struct twe_softc *sc;
168 	pci_chipset_tag_t pc;
169 	pci_intr_handle_t ih;
170 	pcireg_t csr;
171 	const char *intrstr;
172 	int size, i, rv, rseg;
173 	struct twe_param *dtp, *ctp;
174 	bus_dma_segment_t seg;
175 	struct twe_cmd *tc;
176 	struct twe_attach_args twea;
177 	struct twe_ccb *ccb;
178 
179 	sc = (struct twe_softc *)self;
180 	pa = aux;
181 	pc = pa->pa_pc;
182 	sc->sc_dmat = pa->pa_dmat;
183 	SIMPLEQ_INIT(&sc->sc_ccb_queue);
184 	SLIST_INIT(&sc->sc_ccb_freelist);
185 
186 	printf(": 3ware Escalade\n");
187 
188 	if (pci_mapreg_map(pa, PCI_CBIO, PCI_MAPREG_TYPE_IO, 0,
189 	    &sc->sc_iot, &sc->sc_ioh, NULL, NULL)) {
190 		printf("%s: can't map i/o space\n", sc->sc_dv.dv_xname);
191 		return;
192 	}
193 
194 	/* Enable the device. */
195 	csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
196 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
197 	    csr | PCI_COMMAND_MASTER_ENABLE);
198 
199 	/* Map and establish the interrupt. */
200 	if (pci_intr_map(pa, &ih)) {
201 		printf("%s: can't map interrupt\n", sc->sc_dv.dv_xname);
202 		return;
203 	}
204 	intrstr = pci_intr_string(pc, ih);
205 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_BIO, twe_intr, sc);
206 	if (sc->sc_ih == NULL) {
207 		printf("%s: can't establish interrupt", sc->sc_dv.dv_xname);
208 		if (intrstr != NULL)
209 			printf(" at %s", intrstr);
210 		printf("\n");
211 		return;
212 	}
213 	if (intrstr != NULL)
214 		printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname, intrstr);
215 
216 	/*
217 	 * Allocate and initialise the command blocks and CCBs.
218 	 */
219         size = sizeof(struct twe_cmd) * TWE_MAX_QUEUECNT;
220 
221 	if ((rv = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &seg, 1,
222 	    &rseg, BUS_DMA_NOWAIT)) != 0) {
223 		printf("%s: unable to allocate commands, rv = %d\n",
224 		    sc->sc_dv.dv_xname, rv);
225 		return;
226 	}
227 
228 	if ((rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, size,
229 	    (caddr_t *)&sc->sc_cmds,
230 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
231 		printf("%s: unable to map commands, rv = %d\n",
232 		    sc->sc_dv.dv_xname, rv);
233 		return;
234 	}
235 
236 	if ((rv = bus_dmamap_create(sc->sc_dmat, size, size, 1, 0,
237 	    BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
238 		printf("%s: unable to create command DMA map, rv = %d\n",
239 		    sc->sc_dv.dv_xname, rv);
240 		return;
241 	}
242 
243 	if ((rv = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, sc->sc_cmds,
244 	    size, NULL, BUS_DMA_NOWAIT)) != 0) {
245 		printf("%s: unable to load command DMA map, rv = %d\n",
246 		    sc->sc_dv.dv_xname, rv);
247 		return;
248 	}
249 
250 	sc->sc_cmds_paddr = sc->sc_dmamap->dm_segs[0].ds_addr;
251 	memset(sc->sc_cmds, 0, size);
252 
253 	ccb = malloc(sizeof(*ccb) * TWE_MAX_QUEUECNT, M_DEVBUF, M_NOWAIT);
254 	sc->sc_ccbs = ccb;
255 	tc = (struct twe_cmd *)sc->sc_cmds;
256 
257 	for (i = 0; i < TWE_MAX_QUEUECNT; i++, tc++, ccb++) {
258 		ccb->ccb_cmd = tc;
259 		ccb->ccb_cmdid = i;
260 		ccb->ccb_flags = 0;
261 		rv = bus_dmamap_create(sc->sc_dmat, TWE_MAX_XFER,
262 		    TWE_MAX_SEGS, PAGE_SIZE, 0,
263 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
264 		    &ccb->ccb_dmamap_xfer);
265 		if (rv != 0) {
266 			printf("%s: can't create dmamap, rv = %d\n",
267 			    sc->sc_dv.dv_xname, rv);
268 			return;
269 		}
270 		/* Save one CCB for parameter retrieval. */
271 		if (i != 0)
272 			SLIST_INSERT_HEAD(&sc->sc_ccb_freelist, ccb,
273 			    ccb_chain.slist);
274 	}
275 
276 	/* Wait for the controller to become ready. */
277 	if (twe_status_wait(sc, TWE_STS_MICROCONTROLLER_READY, 6)) {
278 		printf("%s: microcontroller not ready\n", sc->sc_dv.dv_xname);
279 		return;
280 	}
281 
282 	TWE_OUTL(sc, TWE_REG_CTL, TWE_CTL_DISABLE_INTRS);
283 
284 	/* Reset the controller. */
285 	if (twe_reset(sc)) {
286 		printf("%s: reset failed\n", sc->sc_dv.dv_xname);
287 		return;
288 	}
289 
290 	/* Find attached units. */
291 	rv = twe_param_get(sc, TWE_PARAM_UNITSUMMARY,
292 	    TWE_PARAM_UNITSUMMARY_Status, TWE_MAX_UNITS, NULL, (void **)&dtp);
293 	if (rv != 0) {
294 		printf("%s: can't detect attached units (%d)\n",
295 		    sc->sc_dv.dv_xname, rv);
296 		return;
297 	}
298 
299 	/* For each detected unit, collect size and store in an array. */
300 	for (i = 0, sc->sc_nunits = 0; i < TWE_MAX_UNITS; i++) {
301 		/* Unit present? */
302 		if ((dtp->tp_data[i] & TWE_PARAM_UNITSTATUS_Online) == 0) {
303 			sc->sc_dsize[i] = 0;
304 	   		continue;
305 	   	}
306 
307 		rv = twe_param_get(sc, TWE_PARAM_UNITINFO + i,
308 		    TWE_PARAM_UNITINFO_Capacity, 4, NULL, (void **)&ctp);
309 		if (rv != 0) {
310 			printf("%s: error %d fetching capacity for unit %d\n",
311 			    sc->sc_dv.dv_xname, rv, i);
312 			continue;
313 		}
314 
315 		sc->sc_dsize[i] = le32toh(*(u_int32_t *)ctp->tp_data);
316 		free(ctp, M_DEVBUF);
317 		sc->sc_nunits++;
318 	}
319 	free(dtp, M_DEVBUF);
320 
321 	/* Initialise connection with controller and enable interrupts. */
322 	twe_init_connection(sc);
323 	TWE_OUTL(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR |
324 	    TWE_CTL_UNMASK_RESP_INTR |
325 	    TWE_CTL_ENABLE_INTRS);
326 
327 	/* Attach sub-devices. */
328 	for (i = 0; i < TWE_MAX_UNITS; i++) {
329 		if (sc->sc_dsize[i] == 0)
330 			continue;
331 		twea.twea_unit = i;
332 		config_found_sm(&sc->sc_dv, &twea, twe_print, twe_submatch);
333 	}
334 }
335 
336 /*
337  * Reset the controller.  Currently only useful at attach time; must be
338  * called with interrupts blocked.
339  */
340 static int
341 twe_reset(struct twe_softc *sc)
342 {
343 	struct twe_param *tp;
344 	u_int aen, status;
345 	volatile u_int32_t junk;
346 	int got, rv;
347 
348 	/* Issue a soft reset. */
349 	TWE_OUTL(sc, TWE_REG_CTL, TWE_CTL_ISSUE_SOFT_RESET |
350 	    TWE_CTL_CLEAR_HOST_INTR |
351 	    TWE_CTL_CLEAR_ATTN_INTR |
352 	    TWE_CTL_MASK_CMD_INTR |
353 	    TWE_CTL_MASK_RESP_INTR |
354 	    TWE_CTL_CLEAR_ERROR_STS |
355 	    TWE_CTL_DISABLE_INTRS);
356 
357 	if (twe_status_wait(sc, TWE_STS_ATTN_INTR, 15)) {
358 		printf("%s: no attention interrupt\n",
359 		    sc->sc_dv.dv_xname);
360 		return (-1);
361 	}
362 
363 	/* Pull AENs out of the controller; look for a soft reset AEN. */
364 	for (got = 0;;) {
365 		rv = twe_param_get(sc, TWE_PARAM_AEN, TWE_PARAM_AEN_UnitCode,
366 		    2, NULL, (void **)&tp);
367 		if (rv != 0)
368 			printf("%s: error %d while draining response queue\n",
369 			    sc->sc_dv.dv_xname, rv);
370 		aen = TWE_AEN_CODE(le16toh(*(u_int16_t *)tp->tp_data));
371 		free(tp, M_DEVBUF);
372 		if (aen == TWE_AEN_QUEUE_EMPTY)
373 			break;
374 		if (aen == TWE_AEN_SOFT_RESET)
375 			got = 1;
376 	}
377 	if (!got) {
378 		printf("%s: reset not reported\n", sc->sc_dv.dv_xname);
379 		return (-1);
380 	}
381 
382 	/* Check controller status. */
383 	status = TWE_INL(sc, TWE_REG_STS);
384 	if (twe_status_check(sc, status)) {
385 		printf("%s: controller errors detected\n",
386 		    sc->sc_dv.dv_xname);
387 		return (-1);
388 	}
389 
390 	/* Drain the response queue. */
391 	for (;;) {
392 		status = TWE_INL(sc, TWE_REG_STS);
393 		if (twe_status_check(sc, status) != 0) {
394 			printf("%s: can't drain response queue\n",
395 			    sc->sc_dv.dv_xname);
396 			return (-1);
397 		}
398 		if ((status & TWE_STS_RESP_QUEUE_EMPTY) != 0)
399 			break;
400 		junk = TWE_INL(sc, TWE_REG_RESP_QUEUE);
401 	}
402 
403 	return (0);
404 }
405 
406 /*
407  * Print autoconfiguration message for a sub-device.
408  */
409 static int
410 twe_print(void *aux, const char *pnp)
411 {
412 	struct twe_attach_args *twea;
413 
414 	twea = aux;
415 
416 	if (pnp != NULL)
417 		printf("block device at %s", pnp);
418 	printf(" unit %d", twea->twea_unit);
419 	return (UNCONF);
420 }
421 
422 /*
423  * Match a sub-device.
424  */
425 static int
426 twe_submatch(struct device *parent, struct cfdata *cf, void *aux)
427 {
428 	struct twe_attach_args *twea;
429 
430 	twea = aux;
431 
432 	if (cf->tweacf_unit != TWECF_UNIT_DEFAULT &&
433 	    cf->tweacf_unit != twea->twea_unit)
434 		return (0);
435 
436 	return ((*cf->cf_attach->ca_match)(parent, cf, aux));
437 }
438 
439 /*
440  * Interrupt service routine.
441  */
442 static int
443 twe_intr(void *arg)
444 {
445 	struct twe_softc *sc;
446 	u_int status;
447 	int caught, rv;
448 
449 	sc = arg;
450 	caught = 0;
451 	status = TWE_INL(sc, TWE_REG_STS);
452 	twe_status_check(sc, status);
453 
454 	/* Host interrupts - purpose unknown. */
455 	if ((status & TWE_STS_HOST_INTR) != 0) {
456 #ifdef DIAGNOSTIC
457 		printf("%s: host interrupt\n", sc->sc_dv.dv_xname);
458 #endif
459 		TWE_OUTL(sc, TWE_REG_CTL, TWE_CTL_CLEAR_HOST_INTR);
460 		caught = 1;
461 	}
462 
463 	/*
464 	 * Attention interrupts, signalled when a controller or child device
465 	 * state change has occurred.
466 	 */
467 	if ((status & TWE_STS_ATTN_INTR) != 0) {
468 		if ((sc->sc_flags & TWEF_AEN) == 0) {
469 			rv = twe_param_get(sc, TWE_PARAM_AEN,
470 			    TWE_PARAM_AEN_UnitCode, 2, twe_aen_handler,
471 			    NULL);
472 			if (rv != 0) {
473 				printf("%s: unable to retrieve AEN (%d)\n",
474 				    sc->sc_dv.dv_xname, rv);
475 				TWE_OUTL(sc, TWE_REG_CTL,
476 				    TWE_CTL_CLEAR_ATTN_INTR);
477 			} else
478 				sc->sc_flags |= TWEF_AEN;
479 		}
480 		caught = 1;
481 	}
482 
483 	/*
484 	 * Command interrupts, signalled when the controller can accept more
485 	 * commands.  We don't use this; instead, we try to submit commands
486 	 * when we receive them, and when other commands have completed.
487 	 * Mask it so we don't get another one.
488 	 */
489 	if ((status & TWE_STS_CMD_INTR) != 0) {
490 #ifdef DIAGNOSTIC
491 		printf("%s: command interrupt\n", sc->sc_dv.dv_xname);
492 #endif
493 		TWE_OUTL(sc, TWE_REG_CTL, TWE_CTL_MASK_CMD_INTR);
494 		caught = 1;
495 	}
496 
497 	if ((status & TWE_STS_RESP_INTR) != 0) {
498 		twe_poll(sc);
499 		caught = 1;
500 	}
501 
502 	return (caught);
503 }
504 
505 /*
506  * Handle an AEN returned by the controller.
507  */
508 static void
509 twe_aen_handler(struct twe_ccb *ccb, int error)
510 {
511 	struct twe_softc *sc;
512 	struct twe_param *tp;
513 	const char *str;
514 	u_int aen;
515 	int i, hu, rv;
516 
517 	sc = (struct twe_softc *)ccb->ccb_tx.tx_dv;
518 	tp = ccb->ccb_tx.tx_context;
519 	twe_ccb_unmap(sc, ccb);
520 
521 	if (error) {
522 		printf("%s: error retrieving AEN\n", sc->sc_dv.dv_xname);
523 		aen = TWE_AEN_QUEUE_EMPTY;
524 	} else
525 		aen = le16toh(*(u_int16_t *)tp->tp_data);
526 	free(tp, M_DEVBUF);
527 	twe_ccb_free(sc, ccb);
528 
529 	if (TWE_AEN_CODE(aen) == TWE_AEN_QUEUE_EMPTY) {
530 		TWE_OUTL(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR);
531 		sc->sc_flags &= ~TWEF_AEN;
532 		return;
533 	}
534 
535 	str = "<unknown>";
536 	i = 0;
537 	hu = 0;
538 
539 	while (i < sizeof(twe_aen_names) / sizeof(twe_aen_names[0])) {
540 		if (TWE_AEN_CODE(twe_aen_names[i].aen) == TWE_AEN_CODE(aen)) {
541 			str = twe_aen_names[i].desc;
542 			hu = (TWE_AEN_UNIT(twe_aen_names[i].aen) != 0);
543 			break;
544 		}
545 		i++;
546 	}
547 	printf("%s: AEN 0x%04x (%s) received", sc->sc_dv.dv_xname,
548 	    TWE_AEN_CODE(aen), str);
549 	if (hu != 0)
550 		printf(" for unit %d", TWE_AEN_UNIT(aen));
551 	printf("\n");
552 
553 	/*
554 	 * Chain another retrieval in case interrupts have been
555 	 * coalesced.
556 	 */
557 	rv = twe_param_get(sc, TWE_PARAM_AEN, TWE_PARAM_AEN_UnitCode, 2,
558 	    twe_aen_handler, NULL);
559 	if (rv != 0)
560 		printf("%s: unable to retrieve AEN (%d)\n",
561 		    sc->sc_dv.dv_xname, rv);
562 }
563 
564 /*
565  * Execute a TWE_OP_GET_PARAM command.  If a callback function is provided,
566  * it will be called with generated context when the command has completed.
567  * If no callback is provided, the command will be executed synchronously
568  * and a pointer to a buffer containing the data returned.
569  *
570  * The caller or callback is responsible for freeing the buffer.
571  */
572 static int
573 twe_param_get(struct twe_softc *sc, int table_id, int param_id, size_t size,
574 	      void (*func)(struct twe_ccb *, int), void **pbuf)
575 {
576 	struct twe_ccb *ccb;
577 	struct twe_cmd *tc;
578 	struct twe_param *tp;
579 	int rv, s;
580 
581 	rv = twe_ccb_alloc(sc, &ccb,
582 	    TWE_CCB_PARAM | TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT);
583 	if (rv != 0)
584 		return (rv);
585 
586 	tp = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_NOWAIT);
587 	if (pbuf != NULL)
588 		*pbuf = tp;
589 
590 	ccb->ccb_data = tp;
591 	ccb->ccb_datasize = TWE_SECTOR_SIZE;
592 	ccb->ccb_tx.tx_handler = func;
593 	ccb->ccb_tx.tx_context = tp;
594 	ccb->ccb_tx.tx_dv = &sc->sc_dv;
595 
596 	tc = ccb->ccb_cmd;
597 	tc->tc_size = 2;
598 	tc->tc_opcode = TWE_OP_GET_PARAM | (tc->tc_size << 5);
599 	tc->tc_unit = 0;
600 	tc->tc_count = htole16(1);
601 
602 	/* Fill in the outbound parameter data. */
603 	tp->tp_table_id = htole16(table_id);
604 	tp->tp_param_id = param_id;
605 	tp->tp_param_size = size;
606 
607 	/* Map the transfer. */
608 	if ((rv = twe_ccb_map(sc, ccb)) != 0) {
609 		twe_ccb_free(sc, ccb);
610 		free(tp, M_DEVBUF);
611 		return (rv);
612 	}
613 
614 	/* Submit the command and either wait or let the callback handle it. */
615 	if (func == NULL) {
616 		s = splbio();
617 		rv = twe_ccb_poll(sc, ccb, 5);
618 		twe_ccb_unmap(sc, ccb);
619 		twe_ccb_free(sc, ccb);
620 		splx(s);
621 		if (rv != 0)
622 			free(tp, M_DEVBUF);
623 	} else {
624 		twe_ccb_enqueue(sc, ccb);
625 		rv = 0;
626 	}
627 
628 	return (rv);
629 }
630 
631 /*
632  * Execute a TWE_OP_INIT_CONNECTION command.  Return non-zero on error.
633  * Must be called with interrupts blocked.
634  */
635 static int
636 twe_init_connection(struct twe_softc *sc)
637 {
638 	struct twe_ccb *ccb;
639 	struct twe_cmd *tc;
640 	int rv;
641 
642 	if ((rv = twe_ccb_alloc(sc, &ccb, 0)) != 0)
643 		return (rv);
644 
645 	/* Build the command. */
646 	tc = ccb->ccb_cmd;
647 	tc->tc_size = 3;
648 	tc->tc_opcode = TWE_OP_INIT_CONNECTION;
649 	tc->tc_unit = 0;
650 	tc->tc_count = htole16(TWE_MAX_CMDS);
651 	tc->tc_args.init_connection.response_queue_pointer = 0;
652 
653 	/* Submit the command for immediate execution. */
654 	rv = twe_ccb_poll(sc, ccb, 5);
655 	twe_ccb_free(sc, ccb);
656 	return (rv);
657 }
658 
659 /*
660  * Poll the controller for completed commands.  Must be called with
661  * interrupts blocked.
662  */
663 static void
664 twe_poll(struct twe_softc *sc)
665 {
666 	struct twe_ccb *ccb;
667 	int found;
668 	u_int status, cmdid;
669 
670 	found = 0;
671 
672 	for (;;) {
673 		status = TWE_INL(sc, TWE_REG_STS);
674 		twe_status_check(sc, status);
675 
676 		if ((status & TWE_STS_RESP_QUEUE_EMPTY))
677 			break;
678 
679 		found = 1;
680 		cmdid = TWE_INL(sc, TWE_REG_RESP_QUEUE);
681 		cmdid = (cmdid & TWE_RESP_MASK) >> TWE_RESP_SHIFT;
682 		if (cmdid >= TWE_MAX_QUEUECNT) {
683 			printf("%s: bad completion\n", sc->sc_dv.dv_xname);
684 			continue;
685 		}
686 
687 		ccb = sc->sc_ccbs + cmdid;
688 		if ((ccb->ccb_flags & TWE_CCB_ACTIVE) == 0) {
689 			printf("%s: bad completion (not active)\n",
690 			    sc->sc_dv.dv_xname);
691 			continue;
692 		}
693 		ccb->ccb_flags ^= TWE_CCB_COMPLETE | TWE_CCB_ACTIVE;
694 
695 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
696 		    (caddr_t)ccb->ccb_cmd - sc->sc_cmds,
697 		    sizeof(struct twe_cmd),
698 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
699 
700 		/* Pass notification to upper layers. */
701 		if (ccb->ccb_tx.tx_handler != NULL)
702 			(*ccb->ccb_tx.tx_handler)(ccb,
703 			    ccb->ccb_cmd->tc_status != 0 ? EIO : 0);
704 	}
705 
706 	/* If any commands have completed, run the software queue. */
707 	if (found)
708 		twe_ccb_enqueue(sc, NULL);
709 }
710 
711 /*
712  * Wait for `status' to be set in the controller status register.  Return
713  * zero if found, non-zero if the operation timed out.
714  */
715 static int
716 twe_status_wait(struct twe_softc *sc, u_int32_t status, int timo)
717 {
718 
719 	for (timo *= 10; timo != 0; timo--) {
720 		if ((TWE_INL(sc, TWE_REG_STS) & status) == status)
721 			break;
722 		delay(100000);
723 	}
724 
725 	return (timo == 0);
726 }
727 
728 /*
729  * Complain if the status bits aren't what we expect.
730  */
731 static int
732 twe_status_check(struct twe_softc *sc, u_int status)
733 {
734 	int rv;
735 
736 	rv = 0;
737 
738 	if ((status & TWE_STS_EXPECTED_BITS) != TWE_STS_EXPECTED_BITS) {
739 		printf("%s: missing status bits: 0x%08x\n", sc->sc_dv.dv_xname,
740 		    status & ~TWE_STS_EXPECTED_BITS);
741 		rv = -1;
742 	}
743 
744 	if ((status & TWE_STS_UNEXPECTED_BITS) != 0) {
745 		printf("%s: unexpected status bits: 0x%08x\n",
746 		    sc->sc_dv.dv_xname, status & TWE_STS_UNEXPECTED_BITS);
747 		rv = -1;
748 	}
749 
750 	return (rv);
751 }
752 
753 /*
754  * Allocate and initialise a CCB.
755  */
756 int
757 twe_ccb_alloc(struct twe_softc *sc, struct twe_ccb **ccbp, int flags)
758 {
759 	struct twe_cmd *tc;
760 	struct twe_ccb *ccb;
761 	int s;
762 
763 	s = splbio();
764 	if ((flags & TWE_CCB_PARAM) != 0)
765 		ccb = sc->sc_ccbs;
766 	else {
767 		/* Allocate a CCB and command block. */
768 		if (SLIST_FIRST(&sc->sc_ccb_freelist) == NULL) {
769 			splx(s);
770 			return (EAGAIN);
771 		}
772 		ccb = SLIST_FIRST(&sc->sc_ccb_freelist);
773 		SLIST_REMOVE_HEAD(&sc->sc_ccb_freelist, ccb_chain.slist);
774 	}
775 #ifdef DIAGNOSTIC
776 	if ((ccb->ccb_flags & TWE_CCB_ALLOCED) != 0)
777 		panic("twe_ccb_alloc: CCB already allocated");
778 	flags |= TWE_CCB_ALLOCED;
779 #endif
780 	splx(s);
781 
782 	/* Initialise some fields and return. */
783 	ccb->ccb_tx.tx_handler = NULL;
784 	ccb->ccb_flags = flags;
785 	tc = ccb->ccb_cmd;
786 	tc->tc_status = 0;
787 	tc->tc_flags = 0;
788 	tc->tc_cmdid = ccb->ccb_cmdid;
789 	*ccbp = ccb;
790 
791 	return (0);
792 }
793 
794 /*
795  * Free a CCB.
796  */
797 void
798 twe_ccb_free(struct twe_softc *sc, struct twe_ccb *ccb)
799 {
800 	int s;
801 
802 	s = splbio();
803 	if ((ccb->ccb_flags & TWE_CCB_PARAM) == 0)
804 		SLIST_INSERT_HEAD(&sc->sc_ccb_freelist, ccb, ccb_chain.slist);
805 	ccb->ccb_flags = 0;
806 	splx(s);
807 }
808 
809 /*
810  * Map the specified CCB's command block and data buffer (if any) into
811  * controller visible space.  Perform DMA synchronisation.
812  */
813 int
814 twe_ccb_map(struct twe_softc *sc, struct twe_ccb *ccb)
815 {
816 	struct twe_cmd *tc;
817 	int flags, nsegs, i, s, rv;
818 	void *data;
819 
820 	/*
821 	 * The data as a whole must be 512-byte aligned.
822 	 */
823 	if (((u_long)ccb->ccb_data & (TWE_ALIGNMENT - 1)) != 0) {
824 		s = splvm();
825 		/* XXX */
826 		ccb->ccb_abuf = uvm_km_kmemalloc(kmem_map, NULL,
827 		    ccb->ccb_datasize, UVM_KMF_NOWAIT);
828 		splx(s);
829 		data = (void *)ccb->ccb_abuf;
830 		if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0)
831 			memcpy(data, ccb->ccb_data, ccb->ccb_datasize);
832 	} else {
833 		ccb->ccb_abuf = (vaddr_t)0;
834 		data = ccb->ccb_data;
835 	}
836 
837 	/*
838 	 * Map the data buffer into bus space and build the S/G list.
839 	 */
840 	rv = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap_xfer, data,
841 	    ccb->ccb_datasize, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
842 	    ((ccb->ccb_flags & TWE_CCB_DATA_IN) ?
843 	     BUS_DMA_READ : BUS_DMA_WRITE));
844 	if (rv != 0) {
845 		if (ccb->ccb_abuf != (vaddr_t)0) {
846 			s = splvm();
847 			/* XXX */
848 			uvm_km_free(kmem_map, ccb->ccb_abuf,
849 			    ccb->ccb_datasize);
850 			splx(s);
851 		}
852 		return (rv);
853 	}
854 
855 	nsegs = ccb->ccb_dmamap_xfer->dm_nsegs;
856 	tc = ccb->ccb_cmd;
857 	tc->tc_size += 2 * nsegs;
858 
859 	/* The location of the S/G list is dependant upon command type. */
860 	switch (tc->tc_opcode >> 5) {
861 	case 2:
862 		for (i = 0; i < nsegs; i++) {
863 			tc->tc_args.param.sgl[i].tsg_address =
864 			    htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_addr);
865 			tc->tc_args.param.sgl[i].tsg_length =
866 			    htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_len);
867 		}
868 		/* XXX Needed? */
869 		for (; i < TWE_SG_SIZE; i++) {
870 			tc->tc_args.param.sgl[i].tsg_address = 0;
871 			tc->tc_args.param.sgl[i].tsg_length = 0;
872 		}
873 		break;
874 	case 3:
875 		for (i = 0; i < nsegs; i++) {
876 			tc->tc_args.io.sgl[i].tsg_address =
877 			    htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_addr);
878 			tc->tc_args.io.sgl[i].tsg_length =
879 			    htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_len);
880 		}
881 		/* XXX Needed? */
882 		for (; i < TWE_SG_SIZE; i++) {
883 			tc->tc_args.io.sgl[i].tsg_address = 0;
884 			tc->tc_args.io.sgl[i].tsg_length = 0;
885 		}
886 		break;
887 #ifdef DEBUG
888 	default:
889 		panic("twe_ccb_map: oops");
890 #endif
891 	}
892 
893 	if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0)
894 		flags = BUS_DMASYNC_PREREAD;
895 	else
896 		flags = 0;
897 	if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0)
898 		flags |= BUS_DMASYNC_PREWRITE;
899 
900 	bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0,
901 	    ccb->ccb_datasize, flags);
902 	return (0);
903 }
904 
905 /*
906  * Unmap the specified CCB's command block and data buffer (if any) and
907  * perform DMA synchronisation.
908  */
909 void
910 twe_ccb_unmap(struct twe_softc *sc, struct twe_ccb *ccb)
911 {
912 	int flags, s;
913 
914 	if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0)
915 		flags = BUS_DMASYNC_POSTREAD;
916 	else
917 		flags = 0;
918 	if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0)
919 		flags |= BUS_DMASYNC_POSTWRITE;
920 
921 	bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0,
922 	    ccb->ccb_datasize, flags);
923 	bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap_xfer);
924 
925 	if (ccb->ccb_abuf != (vaddr_t)0) {
926 		if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0)
927 			memcpy(ccb->ccb_data, (void *)ccb->ccb_abuf,
928 			    ccb->ccb_datasize);
929 		s = splvm();
930 		/* XXX */
931 		uvm_km_free(kmem_map, ccb->ccb_abuf, ccb->ccb_datasize);
932 		splx(s);
933 	}
934 }
935 
936 /*
937  * Submit a command to the controller and poll on completion.  Return
938  * non-zero on timeout (but don't check status, as some command types don't
939  * return status).  Must be called with interrupts blocked.
940  */
941 int
942 twe_ccb_poll(struct twe_softc *sc, struct twe_ccb *ccb, int timo)
943 {
944 	int rv;
945 
946 	if ((rv = twe_ccb_submit(sc, ccb)) != 0)
947 		return (rv);
948 
949 	for (timo *= 1000; timo != 0; timo--) {
950 		twe_poll(sc);
951 		if ((ccb->ccb_flags & TWE_CCB_COMPLETE) != 0)
952 			break;
953 		DELAY(100);
954 	}
955 
956 	return (timo == 0);
957 }
958 
959 /*
960  * If a CCB is specified, enqueue it.  Pull CCBs off the software queue in
961  * the order that they were enqueued and try to submit their command blocks
962  * to the controller for execution.
963  */
964 void
965 twe_ccb_enqueue(struct twe_softc *sc, struct twe_ccb *ccb)
966 {
967 	int s;
968 
969 	s = splbio();
970 
971 	if (ccb != NULL)
972 		SIMPLEQ_INSERT_TAIL(&sc->sc_ccb_queue, ccb, ccb_chain.simpleq);
973 
974 	while ((ccb = SIMPLEQ_FIRST(&sc->sc_ccb_queue)) != NULL) {
975 		if (twe_ccb_submit(sc, ccb))
976 			break;
977 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_queue, ccb, ccb_chain.simpleq);
978 	}
979 
980 	splx(s);
981 }
982 
983 /*
984  * Submit the command block associated with the specified CCB to the
985  * controller for execution.  Must be called with interrupts blocked.
986  */
987 int
988 twe_ccb_submit(struct twe_softc *sc, struct twe_ccb *ccb)
989 {
990 	bus_addr_t pa;
991 	int rv;
992 	u_int status;
993 
994 	/* Check to see if we can post a command. */
995 	status = TWE_INL(sc, TWE_REG_STS);
996 	twe_status_check(sc, status);
997 
998 	if ((status & TWE_STS_CMD_QUEUE_FULL) == 0) {
999 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1000 		    (caddr_t)ccb->ccb_cmd - sc->sc_cmds, sizeof(struct twe_cmd),
1001 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1002 		ccb->ccb_flags |= TWE_CCB_ACTIVE;
1003 		pa = sc->sc_cmds_paddr +
1004 		    ccb->ccb_cmdid * sizeof(struct twe_cmd);
1005 		TWE_OUTL(sc, TWE_REG_CMD_QUEUE, (u_int32_t)pa);
1006 		rv = 0;
1007 	} else
1008 		rv = EBUSY;
1009 
1010 	return (rv);
1011 }
1012