xref: /netbsd-src/sys/dev/pci/twe.c (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /*	$NetBSD: twe.c,v 1.109 2021/04/24 23:36:57 thorpej Exp $	*/
2 
3 /*-
4  * Copyright (c) 2000, 2001, 2002, 2003, 2004 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran; and by Jason R. Thorpe of Wasabi Systems, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*-
33  * Copyright (c) 2000 Michael Smith
34  * Copyright (c) 2000 BSDi
35  * All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  * 1. Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  * 2. Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in the
44  *    documentation and/or other materials provided with the distribution.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
47  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
50  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56  * SUCH DAMAGE.
57  *
58  * from FreeBSD: twe.c,v 1.1 2000/05/24 23:35:23 msmith Exp
59  */
60 
61 /*
62  * Driver for the 3ware Escalade family of RAID controllers.
63  */
64 
65 #include <sys/cdefs.h>
66 __KERNEL_RCSID(0, "$NetBSD: twe.c,v 1.109 2021/04/24 23:36:57 thorpej Exp $");
67 
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/kernel.h>
71 #include <sys/device.h>
72 #include <sys/queue.h>
73 #include <sys/proc.h>
74 #include <sys/buf.h>
75 #include <sys/endian.h>
76 #include <sys/malloc.h>
77 #include <sys/conf.h>
78 #include <sys/disk.h>
79 #include <sys/sysctl.h>
80 #include <sys/syslog.h>
81 #include <sys/kauth.h>
82 #include <sys/module.h>
83 #include <sys/bswap.h>
84 #include <sys/bus.h>
85 
86 #include <dev/pci/pcireg.h>
87 #include <dev/pci/pcivar.h>
88 #include <dev/pci/pcidevs.h>
89 #include <dev/pci/twereg.h>
90 #include <dev/pci/twevar.h>
91 #include <dev/pci/tweio.h>
92 
93 #include "locators.h"
94 #include "ioconf.h"
95 
96 #define	PCI_CBIO	0x10
97 
98 static int	twe_aen_get(struct twe_softc *, uint16_t *);
99 static void	twe_aen_handler(struct twe_ccb *, int);
100 static void	twe_aen_enqueue(struct twe_softc *sc, uint16_t, int);
101 static uint16_t	twe_aen_dequeue(struct twe_softc *);
102 
103 static void	twe_attach(device_t, device_t, void *);
104 static int	twe_rescan(device_t, const char *, const int *);
105 static int	twe_init_connection(struct twe_softc *);
106 static int	twe_intr(void *);
107 static int	twe_match(device_t, cfdata_t, void *);
108 static int	twe_param_set(struct twe_softc *, int, int, size_t, void *);
109 static void	twe_poll(struct twe_softc *);
110 static int	twe_print(void *, const char *);
111 static int	twe_reset(struct twe_softc *);
112 static int	twe_status_check(struct twe_softc *, u_int);
113 static int	twe_status_wait(struct twe_softc *, u_int, int);
114 static void	twe_describe_controller(struct twe_softc *);
115 static void	twe_clear_pci_abort(struct twe_softc *sc);
116 static void	twe_clear_pci_parity_error(struct twe_softc *sc);
117 
118 static int	twe_add_unit(struct twe_softc *, int);
119 static int	twe_del_unit(struct twe_softc *, int);
120 static int	twe_init_connection(struct twe_softc *);
121 
122 static inline u_int32_t	twe_inl(struct twe_softc *, int);
123 static inline void twe_outl(struct twe_softc *, int, u_int32_t);
124 
125 extern struct	cfdriver twe_cd;
126 
127 CFATTACH_DECL3_NEW(twe, sizeof(struct twe_softc),
128     twe_match, twe_attach, NULL, NULL, twe_rescan, NULL, 0);
129 
130 /* FreeBSD driver revision for sysctl expected by the 3ware cli */
131 const char twever[] = "1.50.01.002";
132 
133 /*
134  * Tables to convert numeric codes to strings.
135  */
136 const struct twe_code_table twe_table_status[] = {
137 	{ 0x00,	"successful completion" },
138 
139 	/* info */
140 	{ 0x42,	"command in progress" },
141 	{ 0x6c,	"retrying interface CRC error from UDMA command" },
142 
143 	/* warning */
144 	{ 0x81,	"redundant/inconsequential request ignored" },
145 	{ 0x8e,	"failed to write zeroes to LBA 0" },
146 	{ 0x8f,	"failed to profile TwinStor zones" },
147 
148 	/* fatal */
149 	{ 0xc1,	"aborted due to system command or reconfiguration" },
150 	{ 0xc4,	"aborted" },
151 	{ 0xc5,	"access error" },
152 	{ 0xc6,	"access violation" },
153 	{ 0xc7,	"device failure" },	/* high byte may be port # */
154 	{ 0xc8,	"controller error" },
155 	{ 0xc9,	"timed out" },
156 	{ 0xcb,	"invalid unit number" },
157 	{ 0xcf,	"unit not available" },
158 	{ 0xd2,	"undefined opcode" },
159 	{ 0xdb,	"request incompatible with unit" },
160 	{ 0xdc,	"invalid request" },
161 	{ 0xff,	"firmware error, reset requested" },
162 
163 	{ 0,	NULL }
164 };
165 
166 const struct twe_code_table twe_table_unitstate[] = {
167 	{ TWE_PARAM_UNITSTATUS_Normal,		"Normal" },
168 	{ TWE_PARAM_UNITSTATUS_Initialising,	"Initializing" },
169 	{ TWE_PARAM_UNITSTATUS_Degraded,	"Degraded" },
170 	{ TWE_PARAM_UNITSTATUS_Rebuilding,	"Rebuilding" },
171 	{ TWE_PARAM_UNITSTATUS_Verifying,	"Verifying" },
172 	{ TWE_PARAM_UNITSTATUS_Corrupt,		"Corrupt" },
173 	{ TWE_PARAM_UNITSTATUS_Missing,		"Missing" },
174 
175 	{ 0,					NULL }
176 };
177 
178 const struct twe_code_table twe_table_unittype[] = {
179 	/* array descriptor configuration */
180 	{ TWE_AD_CONFIG_RAID0,			"RAID0" },
181 	{ TWE_AD_CONFIG_RAID1,			"RAID1" },
182 	{ TWE_AD_CONFIG_TwinStor,		"TwinStor" },
183 	{ TWE_AD_CONFIG_RAID5,			"RAID5" },
184 	{ TWE_AD_CONFIG_RAID10,			"RAID10" },
185 	{ TWE_UD_CONFIG_JBOD,			"JBOD" },
186 
187 	{ 0,					NULL }
188 };
189 
190 const struct twe_code_table twe_table_stripedepth[] = {
191 	{ TWE_AD_STRIPE_4k,			"4K" },
192 	{ TWE_AD_STRIPE_8k,			"8K" },
193 	{ TWE_AD_STRIPE_16k,			"16K" },
194 	{ TWE_AD_STRIPE_32k,			"32K" },
195 	{ TWE_AD_STRIPE_64k,			"64K" },
196 	{ TWE_AD_STRIPE_128k,			"128K" },
197 	{ TWE_AD_STRIPE_256k,			"256K" },
198 	{ TWE_AD_STRIPE_512k,			"512K" },
199 	{ TWE_AD_STRIPE_1024k,			"1024K" },
200 
201 	{ 0,					NULL }
202 };
203 
204 /*
205  * Asynchronous event notification messages are qualified:
206  *	a - not unit/port specific
207  *	u - unit specific
208  *	p - port specific
209  *
210  * They are further qualified with a severity:
211  *	E - LOG_EMERG
212  *	a - LOG_ALERT
213  *	c - LOG_CRIT
214  *	e - LOG_ERR
215  *	w - LOG_WARNING
216  *	n - LOG_NOTICE
217  *	i - LOG_INFO
218  *	d - LOG_DEBUG
219  *	blank - just use printf
220  */
221 const struct twe_code_table twe_table_aen[] = {
222 	{ 0x00,	"a  queue empty" },
223 	{ 0x01,	"a  soft reset" },
224 	{ 0x02,	"uc degraded mode" },
225 	{ 0x03,	"aa controller error" },
226 	{ 0x04,	"uE rebuild fail" },
227 	{ 0x05,	"un rebuild done" },
228 	{ 0x06,	"ue incomplete unit" },
229 	{ 0x07,	"un initialization done" },
230 	{ 0x08,	"uw unclean shutdown detected" },
231 	{ 0x09,	"pe drive timeout" },
232 	{ 0x0a,	"pc drive error" },
233 	{ 0x0b,	"un rebuild started" },
234 	{ 0x0c,	"un initialization started" },
235 	{ 0x0d,	"ui logical unit deleted" },
236 	{ 0x0f,	"pc SMART threshold exceeded" },
237 	{ 0x15,	"a  table undefined" },	/* XXX: Not in FreeBSD's table */
238 	{ 0x21,	"pe ATA UDMA downgrade" },
239 	{ 0x22,	"pi ATA UDMA upgrade" },
240 	{ 0x23,	"pw sector repair occurred" },
241 	{ 0x24,	"aa SBUF integrity check failure" },
242 	{ 0x25,	"pa lost cached write" },
243 	{ 0x26,	"pa drive ECC error detected" },
244 	{ 0x27,	"pe DCB checksum error" },
245 	{ 0x28,	"pn DCB unsupported version" },
246 	{ 0x29,	"ui verify started" },
247 	{ 0x2a,	"ua verify failed" },
248 	{ 0x2b,	"ui verify complete" },
249 	{ 0x2c,	"pw overwrote bad sector during rebuild" },
250 	{ 0x2d,	"pa encountered bad sector during rebuild" },
251 	{ 0x2e,	"pe replacement drive too small" },
252 	{ 0x2f,	"ue array not previously initialized" },
253 	{ 0x30,	"p  drive not supported" },
254 	{ 0xff,	"a  aen queue full" },
255 
256 	{ 0,	NULL },
257 };
258 
259 const char *
260 twe_describe_code(const struct twe_code_table *table, uint32_t code)
261 {
262 
263 	for (; table->string != NULL; table++) {
264 		if (table->code == code)
265 			return (table->string);
266 	}
267 	return (NULL);
268 }
269 
270 static inline u_int32_t
271 twe_inl(struct twe_softc *sc, int off)
272 {
273 
274 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
275 	    BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
276 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
277 }
278 
279 static inline void
280 twe_outl(struct twe_softc *sc, int off, u_int32_t val)
281 {
282 
283 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
284 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
285 	    BUS_SPACE_BARRIER_WRITE);
286 }
287 
288 /*
289  * Match a supported board.
290  */
291 static int
292 twe_match(device_t parent, cfdata_t cfdata, void *aux)
293 {
294 	struct pci_attach_args *pa;
295 
296 	pa = aux;
297 
298 	return (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_3WARE &&
299 	    (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_3WARE_ESCALADE ||
300 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_3WARE_ESCALADE_ASIC));
301 }
302 
303 /*
304  * Attach a supported board.
305  *
306  * XXX This doesn't fail gracefully.
307  */
308 static void
309 twe_attach(device_t parent, device_t self, void *aux)
310 {
311 	struct pci_attach_args *pa;
312 	struct twe_softc *sc;
313 	pci_chipset_tag_t pc;
314 	pci_intr_handle_t ih;
315 	pcireg_t csr;
316 	const char *intrstr;
317 	int s, size, i, rv, rseg;
318 	size_t max_segs, max_xfer;
319 	bus_dma_segment_t seg;
320 	const struct sysctlnode *node;
321 	struct twe_cmd *tc;
322 	struct twe_ccb *ccb;
323 	char intrbuf[PCI_INTRSTR_LEN];
324 
325 	sc = device_private(self);
326 	sc->sc_dev = self;
327 	pa = aux;
328 	pc = pa->pa_pc;
329 	sc->sc_dmat = pa->pa_dmat;
330 	SIMPLEQ_INIT(&sc->sc_ccb_queue);
331 	SLIST_INIT(&sc->sc_ccb_freelist);
332 
333 	aprint_naive(": RAID controller\n");
334 	aprint_normal(": 3ware Escalade\n");
335 
336 
337 	if (pci_mapreg_map(pa, PCI_CBIO, PCI_MAPREG_TYPE_IO, 0,
338 	    &sc->sc_iot, &sc->sc_ioh, NULL, NULL)) {
339 		aprint_error_dev(self, "can't map i/o space\n");
340 		return;
341 	}
342 
343 	/* Enable the device. */
344 	csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
345 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
346 	    csr | PCI_COMMAND_MASTER_ENABLE);
347 
348 	/* Map and establish the interrupt. */
349 	if (pci_intr_map(pa, &ih)) {
350 		aprint_error_dev(self, "can't map interrupt\n");
351 		return;
352 	}
353 
354 	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
355 	sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_BIO, twe_intr, sc,
356 	    device_xname(self));
357 	if (sc->sc_ih == NULL) {
358 		aprint_error_dev(self, "can't establish interrupt%s%s\n",
359 			(intrstr) ? " at " : "",
360 			(intrstr) ? intrstr : "");
361 		return;
362 	}
363 
364 	if (intrstr != NULL)
365 		aprint_normal_dev(self, "interrupting at %s\n", intrstr);
366 
367 	/*
368 	 * Allocate and initialise the command blocks and CCBs.
369 	 */
370 	size = sizeof(struct twe_cmd) * TWE_MAX_QUEUECNT;
371 
372 	if ((rv = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &seg, 1,
373 	    &rseg, BUS_DMA_NOWAIT)) != 0) {
374 		aprint_error_dev(self,
375 		    "unable to allocate commands, rv = %d\n", rv);
376 		return;
377 	}
378 
379 	if ((rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, size,
380 	    (void **)&sc->sc_cmds,
381 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
382 		aprint_error_dev(self,
383 		    "unable to map commands, rv = %d\n", rv);
384 		return;
385 	}
386 
387 	if ((rv = bus_dmamap_create(sc->sc_dmat, size, size, 1, 0,
388 	    BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
389 		aprint_error_dev(self,
390 		    "unable to create command DMA map, rv = %d\n", rv);
391 		return;
392 	}
393 
394 	if ((rv = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, sc->sc_cmds,
395 	    size, NULL, BUS_DMA_NOWAIT)) != 0) {
396 		aprint_error_dev(self,
397 		    "unable to load command DMA map, rv = %d\n", rv);
398 		return;
399 	}
400 
401 	sc->sc_cmds_paddr = sc->sc_dmamap->dm_segs[0].ds_addr;
402 	memset(sc->sc_cmds, 0, size);
403 
404 	tc = (struct twe_cmd *)sc->sc_cmds;
405 	max_segs = twe_get_maxsegs();
406 	max_xfer = twe_get_maxxfer(max_segs);
407 
408 	ccb = malloc(sizeof(*ccb) * TWE_MAX_QUEUECNT, M_DEVBUF, M_WAITOK);
409 	sc->sc_ccbs = ccb;
410 
411 	for (i = 0; i < TWE_MAX_QUEUECNT; i++, tc++, ccb++) {
412 		ccb->ccb_cmd = tc;
413 		ccb->ccb_cmdid = i;
414 		ccb->ccb_flags = 0;
415 		rv = bus_dmamap_create(sc->sc_dmat, max_xfer,
416 		    max_segs, PAGE_SIZE, 0,
417 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
418 		    &ccb->ccb_dmamap_xfer);
419 		if (rv != 0) {
420 			aprint_error_dev(self,
421 			    "can't create dmamap, rv = %d\n", rv);
422 			return;
423 		}
424 
425 		/* Save the first CCB for AEN retrieval. */
426 		if (i != 0)
427 			SLIST_INSERT_HEAD(&sc->sc_ccb_freelist, ccb,
428 			    ccb_chain.slist);
429 	}
430 
431 	/* Wait for the controller to become ready. */
432 	if (twe_status_wait(sc, TWE_STS_MICROCONTROLLER_READY, 6)) {
433 		aprint_error_dev(self, "microcontroller not ready\n");
434 		return;
435 	}
436 
437 	twe_outl(sc, TWE_REG_CTL, TWE_CTL_DISABLE_INTRS);
438 
439 	/* Reset the controller. */
440 	s = splbio();
441 	rv = twe_reset(sc);
442 	splx(s);
443 	if (rv) {
444 		aprint_error_dev(self, "reset failed\n");
445 		return;
446 	}
447 
448 	/* Initialise connection with controller. */
449 	twe_init_connection(sc);
450 
451 	twe_describe_controller(sc);
452 
453 	/* Find and attach RAID array units. */
454 	twe_rescan(self, NULL, NULL);
455 
456 	/* ...and finally, enable interrupts. */
457 	twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR |
458 	    TWE_CTL_UNMASK_RESP_INTR |
459 	    TWE_CTL_ENABLE_INTRS);
460 
461 	/* sysctl set-up for 3ware cli */
462 	if (sysctl_createv(NULL, 0, NULL, &node,
463 				0, CTLTYPE_NODE, device_xname(self),
464 				SYSCTL_DESCR("twe driver information"),
465 				NULL, 0, NULL, 0,
466 				CTL_HW, CTL_CREATE, CTL_EOL) != 0) {
467 		aprint_error_dev(self, "could not create %s.%s sysctl node\n",
468 		    "hw", device_xname(self));
469 		return;
470 	}
471 	if ((i = sysctl_createv(NULL, 0, NULL, NULL,
472 				0, CTLTYPE_STRING, "driver_version",
473 				SYSCTL_DESCR("twe0 driver version"),
474 				NULL, 0, __UNCONST(&twever), 0,
475 				CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL))
476 				!= 0) {
477 		aprint_error_dev(self,
478 		    "could not create %s.%s.driver_version sysctl\n",
479 		    "hw", device_xname(self));
480 		return;
481 	}
482 }
483 
484 static int
485 twe_rescan(device_t self, const char *ifattr, const int *locs)
486 {
487 	struct twe_softc *sc;
488 	int i;
489 
490 	sc = device_private(self);
491 	sc->sc_nunits = 0;
492 	for (i = 0; i < TWE_MAX_UNITS; i++)
493 		(void) twe_add_unit(sc, i);
494 	return 0;
495 }
496 
497 
498 void
499 twe_register_callbacks(struct twe_softc *sc, int unit,
500     const struct twe_callbacks *tcb)
501 {
502 
503 	sc->sc_units[unit].td_callbacks = tcb;
504 }
505 
506 static void
507 twe_recompute_openings(struct twe_softc *sc)
508 {
509 	struct twe_drive *td;
510 	int unit, openings;
511 
512 	if (sc->sc_nunits != 0)
513 		openings = (TWE_MAX_QUEUECNT - 1) / sc->sc_nunits;
514 	else
515 		openings = 0;
516 	if (openings == sc->sc_openings)
517 		return;
518 	sc->sc_openings = openings;
519 
520 #ifdef TWE_DEBUG
521 	printf("%s: %d array%s, %d openings per array\n",
522 	    device_xname(sc->sc_dev), sc->sc_nunits,
523 	    sc->sc_nunits == 1 ? "" : "s", sc->sc_openings);
524 #endif
525 
526 	for (unit = 0; unit < TWE_MAX_UNITS; unit++) {
527 		td = &sc->sc_units[unit];
528 		if (td->td_dev != NULL)
529 			(*td->td_callbacks->tcb_openings)(td->td_dev,
530 			    sc->sc_openings);
531 	}
532 }
533 
534 static int
535 twe_add_unit(struct twe_softc *sc, int unit)
536 {
537 	struct twe_param *dtp, *atp;
538 	struct twe_array_descriptor *ad;
539 	struct twe_drive *td;
540 	struct twe_attach_args twea;
541 	uint32_t newsize;
542 	int rv;
543 	uint16_t dsize;
544 	uint8_t newtype, newstripe;
545 	int locs[TWECF_NLOCS];
546 
547 	if (unit < 0 || unit >= TWE_MAX_UNITS)
548 		return (EINVAL);
549 
550 	/* Find attached units. */
551 	rv = twe_param_get(sc, TWE_PARAM_UNITSUMMARY,
552 	    TWE_PARAM_UNITSUMMARY_Status, TWE_MAX_UNITS, NULL, &dtp);
553 	if (rv != 0) {
554 		aprint_error_dev(sc->sc_dev,
555 		    "error %d fetching unit summary\n", rv);
556 		return (rv);
557 	}
558 
559 	/* For each detected unit, collect size and store in an array. */
560 	td = &sc->sc_units[unit];
561 
562 	/* Unit present? */
563 	if ((dtp->tp_data[unit] & TWE_PARAM_UNITSTATUS_Online) == 0) {
564 		/*
565 		 * XXX Should we check to see if a device has been
566 		 * XXX attached at this index and detach it if it
567 		 * XXX has?  ("rescan" semantics)
568 		 */
569 		rv = 0;
570 		goto out;
571    	}
572 
573 	rv = twe_param_get_2(sc, TWE_PARAM_UNITINFO + unit,
574 	    TWE_PARAM_UNITINFO_DescriptorSize, &dsize);
575 	if (rv != 0) {
576 		aprint_error_dev(sc->sc_dev,
577 		    "error %d fetching descriptor size for unit %d\n",
578 		    rv, unit);
579 		goto out;
580 	}
581 
582 	rv = twe_param_get(sc, TWE_PARAM_UNITINFO + unit,
583 	    TWE_PARAM_UNITINFO_Descriptor, dsize - 3, NULL, &atp);
584 	if (rv != 0) {
585 		aprint_error_dev(sc->sc_dev,
586 		    "error %d fetching array descriptor for unit %d\n",
587 		    rv, unit);
588 		goto out;
589 	}
590 
591 	ad = (struct twe_array_descriptor *)atp->tp_data;
592 	newtype = ad->configuration;
593 	newstripe = ad->stripe_size;
594 	free(atp, M_DEVBUF);
595 
596 	rv = twe_param_get_4(sc, TWE_PARAM_UNITINFO + unit,
597 	    TWE_PARAM_UNITINFO_Capacity, &newsize);
598 	if (rv != 0) {
599 		aprint_error_dev(sc->sc_dev,
600 		    "error %d fetching capacity for unit %d\n",
601 		    rv, unit);
602 		goto out;
603 	}
604 
605 	/*
606 	 * Have a device, so we need to attach it.  If there is currently
607 	 * something sitting at the slot, and the parameters are different,
608 	 * then we detach the old device before attaching the new one.
609 	 */
610 	if (td->td_dev != NULL &&
611 	    td->td_size == newsize &&
612 	    td->td_type == newtype &&
613 	    td->td_stripe == newstripe) {
614 		/* Same as the old device; just keep using it. */
615 		rv = 0;
616 		goto out;
617 	} else if (td->td_dev != NULL) {
618 		/* Detach the old device first. */
619 		(void) config_detach(td->td_dev, DETACH_FORCE);
620 		td->td_dev = NULL;
621 	} else if (td->td_size == 0)
622 		sc->sc_nunits++;
623 
624 	/*
625 	 * Committed to the new array unit; assign its parameters and
626 	 * recompute the number of available command openings.
627 	 */
628 	td->td_size = newsize;
629 	td->td_type = newtype;
630 	td->td_stripe = newstripe;
631 	twe_recompute_openings(sc);
632 
633 	twea.twea_unit = unit;
634 
635 	locs[TWECF_UNIT] = unit;
636 
637 	td->td_dev = config_found(sc->sc_dev, &twea, twe_print,
638 	    CFARG_SUBMATCH, config_stdsubmatch,
639 	    CFARG_LOCATORS, locs,
640 	    CFARG_EOL);
641 
642 	rv = 0;
643  out:
644 	free(dtp, M_DEVBUF);
645 	return (rv);
646 }
647 
648 static int
649 twe_del_unit(struct twe_softc *sc, int unit)
650 {
651 	struct twe_drive *td;
652 
653 	if (unit < 0 || unit >= TWE_MAX_UNITS)
654 		return (EINVAL);
655 
656 	td = &sc->sc_units[unit];
657 	if (td->td_size != 0)
658 		sc->sc_nunits--;
659 	td->td_size = 0;
660 	td->td_type = 0;
661 	td->td_stripe = 0;
662 	if (td->td_dev != NULL) {
663 		(void) config_detach(td->td_dev, DETACH_FORCE);
664 		td->td_dev = NULL;
665 	}
666 	twe_recompute_openings(sc);
667 	return (0);
668 }
669 
670 /*
671  * Reset the controller.
672  * MUST BE CALLED AT splbio()!
673  */
674 static int
675 twe_reset(struct twe_softc *sc)
676 {
677 	uint16_t aen;
678 	u_int status;
679 	int got, rv;
680 
681 	/* Issue a soft reset. */
682 	twe_outl(sc, TWE_REG_CTL, TWE_CTL_ISSUE_SOFT_RESET |
683 	    TWE_CTL_CLEAR_HOST_INTR |
684 	    TWE_CTL_CLEAR_ATTN_INTR |
685 	    TWE_CTL_MASK_CMD_INTR |
686 	    TWE_CTL_MASK_RESP_INTR |
687 	    TWE_CTL_CLEAR_ERROR_STS |
688 	    TWE_CTL_DISABLE_INTRS);
689 
690 	/* Wait for attention... */
691 	if (twe_status_wait(sc, TWE_STS_ATTN_INTR, 30)) {
692 		aprint_error_dev(sc->sc_dev,
693 		    "timeout waiting for attention interrupt\n");
694 		return (-1);
695 	}
696 
697 	/* ...and ACK it. */
698 	twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR);
699 
700 	/*
701 	 * Pull AENs out of the controller; look for a soft reset AEN.
702 	 * Open code this, since we want to detect reset even if the
703 	 * queue for management tools is full.
704 	 *
705 	 * Note that since:
706 	 *	- interrupts are blocked
707 	 *	- we have reset the controller
708 	 *	- acknowledged the pending ATTENTION
709 	 * that there is no way a pending asynchronous AEN fetch would
710 	 * finish, so clear the flag.
711 	 */
712 	sc->sc_flags &= ~TWEF_AEN;
713 	for (got = 0;;) {
714 		rv = twe_aen_get(sc, &aen);
715 		if (rv != 0)
716 			printf("%s: error %d while draining event queue\n",
717 			    device_xname(sc->sc_dev), rv);
718 		if (TWE_AEN_CODE(aen) == TWE_AEN_QUEUE_EMPTY)
719 			break;
720 		if (TWE_AEN_CODE(aen) == TWE_AEN_SOFT_RESET)
721 			got = 1;
722 		twe_aen_enqueue(sc, aen, 1);
723 	}
724 
725 	if (!got) {
726 		printf("%s: reset not reported\n", device_xname(sc->sc_dev));
727 		return (-1);
728 	}
729 
730 	/* Check controller status. */
731 	status = twe_inl(sc, TWE_REG_STS);
732 	if (twe_status_check(sc, status)) {
733 		printf("%s: controller errors detected\n",
734 		    device_xname(sc->sc_dev));
735 		return (-1);
736 	}
737 
738 	/* Drain the response queue. */
739 	for (;;) {
740 		status = twe_inl(sc, TWE_REG_STS);
741 		if (twe_status_check(sc, status) != 0) {
742 			aprint_error_dev(sc->sc_dev,
743 			    "can't drain response queue\n");
744 			return (-1);
745 		}
746 		if ((status & TWE_STS_RESP_QUEUE_EMPTY) != 0)
747 			break;
748 		(void)twe_inl(sc, TWE_REG_RESP_QUEUE);
749 	}
750 
751 	return (0);
752 }
753 
754 /*
755  * Print autoconfiguration message for a sub-device.
756  */
757 static int
758 twe_print(void *aux, const char *pnp)
759 {
760 	struct twe_attach_args *twea;
761 
762 	twea = aux;
763 
764 	if (pnp != NULL)
765 		aprint_normal("block device at %s", pnp);
766 	aprint_normal(" unit %d", twea->twea_unit);
767 	return (UNCONF);
768 }
769 
770 /*
771  * Interrupt service routine.
772  */
773 static int
774 twe_intr(void *arg)
775 {
776 	struct twe_softc *sc;
777 	u_int status;
778 	int caught, rv;
779 
780 	sc = arg;
781 	caught = 0;
782 	status = twe_inl(sc, TWE_REG_STS);
783 	twe_status_check(sc, status);
784 
785 	/* Host interrupts - purpose unknown. */
786 	if ((status & TWE_STS_HOST_INTR) != 0) {
787 #ifdef DEBUG
788 		printf("%s: host interrupt\n", device_xname(sc->sc_dev));
789 #endif
790 		twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_HOST_INTR);
791 		caught = 1;
792 	}
793 
794 	/*
795 	 * Attention interrupts, signalled when a controller or child device
796 	 * state change has occurred.
797 	 */
798 	if ((status & TWE_STS_ATTN_INTR) != 0) {
799 		rv = twe_aen_get(sc, NULL);
800 		if (rv != 0)
801 			aprint_error_dev(sc->sc_dev,
802 			    "unable to retrieve AEN (%d)\n", rv);
803 		else
804 			twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR);
805 		caught = 1;
806 	}
807 
808 	/*
809 	 * Command interrupts, signalled when the controller can accept more
810 	 * commands.  We don't use this; instead, we try to submit commands
811 	 * when we receive them, and when other commands have completed.
812 	 * Mask it so we don't get another one.
813 	 */
814 	if ((status & TWE_STS_CMD_INTR) != 0) {
815 #ifdef DEBUG
816 		printf("%s: command interrupt\n", device_xname(sc->sc_dev));
817 #endif
818 		twe_outl(sc, TWE_REG_CTL, TWE_CTL_MASK_CMD_INTR);
819 		caught = 1;
820 	}
821 
822 	if ((status & TWE_STS_RESP_INTR) != 0) {
823 		twe_poll(sc);
824 		caught = 1;
825 	}
826 
827 	return (caught);
828 }
829 
830 /*
831  * Fetch an AEN.  Even though this is really like parameter
832  * retrieval, we handle this specially, because we issue this
833  * AEN retrieval command from interrupt context, and thus
834  * reserve a CCB for it to avoid resource shortage.
835  *
836  * XXX There are still potential resource shortages we could
837  * XXX encounter.  Consider pre-allocating all AEN-related
838  * XXX resources.
839  *
840  * MUST BE CALLED AT splbio()!
841  */
842 static int
843 twe_aen_get(struct twe_softc *sc, uint16_t *aenp)
844 {
845 	struct twe_ccb *ccb;
846 	struct twe_cmd *tc;
847 	struct twe_param *tp;
848 	int rv;
849 
850 	/*
851 	 * If we're already retrieving an AEN, just wait; another
852 	 * retrieval will be chained after the current one completes.
853 	 */
854 	if (sc->sc_flags & TWEF_AEN) {
855 		/*
856 		 * It is a fatal software programming error to attempt
857 		 * to fetch an AEN synchronously when an AEN fetch is
858 		 * already pending.
859 		 */
860 		KASSERT(aenp == NULL);
861 		return (0);
862 	}
863 
864 	tp = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_NOWAIT);
865 	if (tp == NULL)
866 		return (ENOMEM);
867 
868 	ccb = twe_ccb_alloc(sc,
869 	    TWE_CCB_AEN | TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT);
870 	KASSERT(ccb != NULL);
871 
872 	ccb->ccb_data = tp;
873 	ccb->ccb_datasize = TWE_SECTOR_SIZE;
874 	ccb->ccb_tx.tx_handler = (aenp == NULL) ? twe_aen_handler : NULL;
875 	ccb->ccb_tx.tx_context = tp;
876 	ccb->ccb_tx.tx_dv = sc->sc_dev;
877 
878 	tc = ccb->ccb_cmd;
879 	tc->tc_size = 2;
880 	tc->tc_opcode = TWE_OP_GET_PARAM | (tc->tc_size << 5);
881 	tc->tc_unit = 0;
882 	tc->tc_count = htole16(1);
883 
884 	/* Fill in the outbound parameter data. */
885 	tp->tp_table_id = htole16(TWE_PARAM_AEN);
886 	tp->tp_param_id = TWE_PARAM_AEN_UnitCode;
887 	tp->tp_param_size = 2;
888 
889 	/* Map the transfer. */
890 	if ((rv = twe_ccb_map(sc, ccb)) != 0) {
891 		twe_ccb_free(sc, ccb);
892 		goto done;
893 	}
894 
895 	/* Enqueue the command and wait. */
896 	if (aenp != NULL) {
897 		rv = twe_ccb_poll(sc, ccb, 5);
898 		twe_ccb_unmap(sc, ccb);
899 		twe_ccb_free(sc, ccb);
900 		if (rv == 0)
901 			*aenp = le16toh(*(uint16_t *)tp->tp_data);
902 		free(tp, M_DEVBUF);
903 	} else {
904 		sc->sc_flags |= TWEF_AEN;
905 		twe_ccb_enqueue(sc, ccb);
906 		rv = 0;
907 	}
908 
909  done:
910 	return (rv);
911 }
912 
913 /*
914  * Handle an AEN returned by the controller.
915  * MUST BE CALLED AT splbio()!
916  */
917 static void
918 twe_aen_handler(struct twe_ccb *ccb, int error)
919 {
920 	struct twe_softc *sc;
921 	struct twe_param *tp;
922 	uint16_t aen;
923 	int rv;
924 
925 	sc = device_private(ccb->ccb_tx.tx_dv);
926 	tp = ccb->ccb_tx.tx_context;
927 	twe_ccb_unmap(sc, ccb);
928 
929 	sc->sc_flags &= ~TWEF_AEN;
930 
931 	if (error) {
932 		aprint_error_dev(sc->sc_dev, "error retrieving AEN\n");
933 		aen = TWE_AEN_QUEUE_EMPTY;
934 	} else
935 		aen = le16toh(*(u_int16_t *)tp->tp_data);
936 	free(tp, M_DEVBUF);
937 	twe_ccb_free(sc, ccb);
938 
939 	if (TWE_AEN_CODE(aen) == TWE_AEN_QUEUE_EMPTY) {
940 		twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR);
941 		return;
942 	}
943 
944 	twe_aen_enqueue(sc, aen, 0);
945 
946 	/*
947 	 * Chain another retrieval in case interrupts have been
948 	 * coalesced.
949 	 */
950 	rv = twe_aen_get(sc, NULL);
951 	if (rv != 0)
952 		aprint_error_dev(sc->sc_dev,
953 		    "unable to retrieve AEN (%d)\n", rv);
954 }
955 
956 static void
957 twe_aen_enqueue(struct twe_softc *sc, uint16_t aen, int quiet)
958 {
959 	const char *str, *msg;
960 	int s, next, nextnext, level;
961 
962 	/*
963 	 * First report the AEN on the console.  Maybe.
964 	 */
965 	if (! quiet) {
966 		str = twe_describe_code(twe_table_aen, TWE_AEN_CODE(aen));
967 		if (str == NULL) {
968 			aprint_error_dev(sc->sc_dev,
969 			    "unknown AEN 0x%04x\n", aen);
970 		} else {
971 			msg = str + 3;
972 			switch (str[1]) {
973 			case 'E':	level = LOG_EMERG; break;
974 			case 'a':	level = LOG_ALERT; break;
975 			case 'c':	level = LOG_CRIT; break;
976 			case 'e':	level = LOG_ERR; break;
977 			case 'w':	level = LOG_WARNING; break;
978 			case 'n':	level = LOG_NOTICE; break;
979 			case 'i':	level = LOG_INFO; break;
980 			case 'd':	level = LOG_DEBUG; break;
981 			default:
982 				/* Don't use syslog. */
983 				level = -1;
984 			}
985 
986 			if (level < 0) {
987 				switch (str[0]) {
988 				case 'u':
989 				case 'p':
990 					printf("%s: %s %d: %s\n",
991 					    device_xname(sc->sc_dev),
992 					    str[0] == 'u' ? "unit" : "port",
993 					    TWE_AEN_UNIT(aen), msg);
994 					break;
995 
996 				default:
997 					printf("%s: %s\n",
998 					    device_xname(sc->sc_dev), msg);
999 				}
1000 			} else {
1001 				switch (str[0]) {
1002 				case 'u':
1003 				case 'p':
1004 					log(level, "%s: %s %d: %s\n",
1005 					    device_xname(sc->sc_dev),
1006 					    str[0] == 'u' ? "unit" : "port",
1007 					    TWE_AEN_UNIT(aen), msg);
1008 					break;
1009 
1010 				default:
1011 					log(level, "%s: %s\n",
1012 					    device_xname(sc->sc_dev), msg);
1013 				}
1014 			}
1015 		}
1016 	}
1017 
1018 	/* Now enqueue the AEN for mangement tools. */
1019 	s = splbio();
1020 
1021 	next = (sc->sc_aen_head + 1) % TWE_AEN_Q_LENGTH;
1022 	nextnext = (sc->sc_aen_head + 2) % TWE_AEN_Q_LENGTH;
1023 
1024 	/*
1025 	 * If this is the last free slot, then queue up a "queue
1026 	 * full" message.
1027 	 */
1028 	if (nextnext == sc->sc_aen_tail)
1029 		aen = TWE_AEN_QUEUE_FULL;
1030 
1031 	if (next != sc->sc_aen_tail) {
1032 		sc->sc_aen_queue[sc->sc_aen_head] = aen;
1033 		sc->sc_aen_head = next;
1034 	}
1035 
1036 	if (sc->sc_flags & TWEF_AENQ_WAIT) {
1037 		sc->sc_flags &= ~TWEF_AENQ_WAIT;
1038 		wakeup(&sc->sc_aen_queue);
1039 	}
1040 
1041 	splx(s);
1042 }
1043 
1044 /* NOTE: Must be called at splbio(). */
1045 static uint16_t
1046 twe_aen_dequeue(struct twe_softc *sc)
1047 {
1048 	uint16_t aen;
1049 
1050 	if (sc->sc_aen_tail == sc->sc_aen_head)
1051 		aen = TWE_AEN_QUEUE_EMPTY;
1052 	else {
1053 		aen = sc->sc_aen_queue[sc->sc_aen_tail];
1054 		sc->sc_aen_tail = (sc->sc_aen_tail + 1) % TWE_AEN_Q_LENGTH;
1055 	}
1056 
1057 	return (aen);
1058 }
1059 
1060 /*
1061  * These are short-hand functions that execute TWE_OP_GET_PARAM to
1062  * fetch 1, 2, and 4 byte parameter values, respectively.
1063  */
1064 int
1065 twe_param_get_1(struct twe_softc *sc, int table_id, int param_id,
1066     uint8_t *valp)
1067 {
1068 	struct twe_param *tp;
1069 	int rv;
1070 
1071 	rv = twe_param_get(sc, table_id, param_id, 1, NULL, &tp);
1072 	if (rv != 0)
1073 		return (rv);
1074 	*valp = *(uint8_t *)tp->tp_data;
1075 	free(tp, M_DEVBUF);
1076 	return (0);
1077 }
1078 
1079 int
1080 twe_param_get_2(struct twe_softc *sc, int table_id, int param_id,
1081     uint16_t *valp)
1082 {
1083 	struct twe_param *tp;
1084 	int rv;
1085 
1086 	rv = twe_param_get(sc, table_id, param_id, 2, NULL, &tp);
1087 	if (rv != 0)
1088 		return (rv);
1089 	*valp = le16toh(*(uint16_t *)tp->tp_data);
1090 	free(tp, M_DEVBUF);
1091 	return (0);
1092 }
1093 
1094 int
1095 twe_param_get_4(struct twe_softc *sc, int table_id, int param_id,
1096     uint32_t *valp)
1097 {
1098 	struct twe_param *tp;
1099 	int rv;
1100 
1101 	rv = twe_param_get(sc, table_id, param_id, 4, NULL, &tp);
1102 	if (rv != 0)
1103 		return (rv);
1104 	*valp = le32toh(*(uint32_t *)tp->tp_data);
1105 	free(tp, M_DEVBUF);
1106 	return (0);
1107 }
1108 
1109 /*
1110  * Execute a TWE_OP_GET_PARAM command.  If a callback function is provided,
1111  * it will be called with generated context when the command has completed.
1112  * If no callback is provided, the command will be executed synchronously
1113  * and a pointer to a buffer containing the data returned.
1114  *
1115  * The caller or callback is responsible for freeing the buffer.
1116  *
1117  * NOTE: We assume we can sleep here to wait for a CCB to become available.
1118  */
1119 int
1120 twe_param_get(struct twe_softc *sc, int table_id, int param_id, size_t size,
1121 	      void (*func)(struct twe_ccb *, int), struct twe_param **pbuf)
1122 {
1123 	struct twe_ccb *ccb;
1124 	struct twe_cmd *tc;
1125 	struct twe_param *tp;
1126 	int rv, s;
1127 
1128 	tp = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_NOWAIT);
1129 	if (tp == NULL)
1130 		return ENOMEM;
1131 
1132 	ccb = twe_ccb_alloc_wait(sc, TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT);
1133 	KASSERT(ccb != NULL);
1134 
1135 	ccb->ccb_data = tp;
1136 	ccb->ccb_datasize = TWE_SECTOR_SIZE;
1137 	ccb->ccb_tx.tx_handler = func;
1138 	ccb->ccb_tx.tx_context = tp;
1139 	ccb->ccb_tx.tx_dv = sc->sc_dev;
1140 
1141 	tc = ccb->ccb_cmd;
1142 	tc->tc_size = 2;
1143 	tc->tc_opcode = TWE_OP_GET_PARAM | (tc->tc_size << 5);
1144 	tc->tc_unit = 0;
1145 	tc->tc_count = htole16(1);
1146 
1147 	/* Fill in the outbound parameter data. */
1148 	tp->tp_table_id = htole16(table_id);
1149 	tp->tp_param_id = param_id;
1150 	tp->tp_param_size = size;
1151 
1152 	/* Map the transfer. */
1153 	if ((rv = twe_ccb_map(sc, ccb)) != 0) {
1154 		twe_ccb_free(sc, ccb);
1155 		goto done;
1156 	}
1157 
1158 	/* Submit the command and either wait or let the callback handle it. */
1159 	if (func == NULL) {
1160 		s = splbio();
1161 		rv = twe_ccb_poll(sc, ccb, 5);
1162 		twe_ccb_unmap(sc, ccb);
1163 		twe_ccb_free(sc, ccb);
1164 		splx(s);
1165 	} else {
1166 #ifdef DEBUG
1167 		if (pbuf != NULL)
1168 			panic("both func and pbuf defined");
1169 #endif
1170 		twe_ccb_enqueue(sc, ccb);
1171 		return 0;
1172 	}
1173 
1174 done:
1175 	if (pbuf == NULL || rv != 0)
1176 		free(tp, M_DEVBUF);
1177 	else if (pbuf != NULL && rv == 0)
1178 		*pbuf = tp;
1179 	return rv;
1180 }
1181 
1182 /*
1183  * Execute a TWE_OP_SET_PARAM command.
1184  *
1185  * NOTE: We assume we can sleep here to wait for a CCB to become available.
1186  */
1187 static int
1188 twe_param_set(struct twe_softc *sc, int table_id, int param_id, size_t size,
1189 	      void *sbuf)
1190 {
1191 	struct twe_ccb *ccb;
1192 	struct twe_cmd *tc;
1193 	struct twe_param *tp;
1194 	int rv, s;
1195 
1196 	tp = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_WAITOK);
1197 	ccb = twe_ccb_alloc_wait(sc, TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT);
1198 	ccb->ccb_data = tp;
1199 	ccb->ccb_datasize = TWE_SECTOR_SIZE;
1200 	ccb->ccb_tx.tx_handler = 0;
1201 	ccb->ccb_tx.tx_context = tp;
1202 	ccb->ccb_tx.tx_dv = sc->sc_dev;
1203 
1204 	tc = ccb->ccb_cmd;
1205 	tc->tc_size = 2;
1206 	tc->tc_opcode = TWE_OP_SET_PARAM | (tc->tc_size << 5);
1207 	tc->tc_unit = 0;
1208 	tc->tc_count = htole16(1);
1209 
1210 	/* Fill in the outbound parameter data. */
1211 	tp->tp_table_id = htole16(table_id);
1212 	tp->tp_param_id = param_id;
1213 	tp->tp_param_size = size;
1214 	memcpy(tp->tp_data, sbuf, size);
1215 
1216 	/* Map the transfer. */
1217 	if ((rv = twe_ccb_map(sc, ccb)) != 0) {
1218 		twe_ccb_free(sc, ccb);
1219 		goto done;
1220 	}
1221 
1222 	/* Submit the command and wait. */
1223 	s = splbio();
1224 	rv = twe_ccb_poll(sc, ccb, 5);
1225 	twe_ccb_unmap(sc, ccb);
1226 	twe_ccb_free(sc, ccb);
1227 	splx(s);
1228 done:
1229 	free(tp, M_DEVBUF);
1230 	return (rv);
1231 }
1232 
1233 /*
1234  * Execute a TWE_OP_INIT_CONNECTION command.  Return non-zero on error.
1235  * Must be called with interrupts blocked.
1236  */
1237 static int
1238 twe_init_connection(struct twe_softc *sc)
1239 {
1240 	struct twe_ccb *ccb;
1241 	struct twe_cmd *tc;
1242 	int rv;
1243 
1244 	if ((ccb = twe_ccb_alloc(sc, 0)) == NULL)
1245 		return (EAGAIN);
1246 
1247 	/* Build the command. */
1248 	tc = ccb->ccb_cmd;
1249 	tc->tc_size = 3;
1250 	tc->tc_opcode = TWE_OP_INIT_CONNECTION;
1251 	tc->tc_unit = 0;
1252 	tc->tc_count = htole16(TWE_MAX_CMDS);
1253 	tc->tc_args.init_connection.response_queue_pointer = 0;
1254 
1255 	/* Submit the command for immediate execution. */
1256 	rv = twe_ccb_poll(sc, ccb, 5);
1257 	twe_ccb_free(sc, ccb);
1258 	return (rv);
1259 }
1260 
1261 /*
1262  * Poll the controller for completed commands.  Must be called with
1263  * interrupts blocked.
1264  */
1265 static void
1266 twe_poll(struct twe_softc *sc)
1267 {
1268 	struct twe_ccb *ccb;
1269 	int found;
1270 	u_int status, cmdid;
1271 
1272 	found = 0;
1273 
1274 	for (;;) {
1275 		status = twe_inl(sc, TWE_REG_STS);
1276 		twe_status_check(sc, status);
1277 
1278 		if ((status & TWE_STS_RESP_QUEUE_EMPTY))
1279 			break;
1280 
1281 		found = 1;
1282 		cmdid = twe_inl(sc, TWE_REG_RESP_QUEUE);
1283 		cmdid = (cmdid & TWE_RESP_MASK) >> TWE_RESP_SHIFT;
1284 		if (cmdid >= TWE_MAX_QUEUECNT) {
1285 			aprint_error_dev(sc->sc_dev, "bad cmdid %d\n", cmdid);
1286 			continue;
1287 		}
1288 
1289 		ccb = sc->sc_ccbs + cmdid;
1290 		if ((ccb->ccb_flags & TWE_CCB_ACTIVE) == 0) {
1291 			printf("%s: CCB for cmdid %d not active\n",
1292 			    device_xname(sc->sc_dev), cmdid);
1293 			continue;
1294 		}
1295 		ccb->ccb_flags ^= TWE_CCB_COMPLETE | TWE_CCB_ACTIVE;
1296 
1297 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1298 		    (char *)ccb->ccb_cmd - (char *)sc->sc_cmds,
1299 		    sizeof(struct twe_cmd),
1300 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1301 
1302 		/* Pass notification to upper layers. */
1303 		if (ccb->ccb_tx.tx_handler != NULL)
1304 			(*ccb->ccb_tx.tx_handler)(ccb,
1305 			    ccb->ccb_cmd->tc_status != 0 ? EIO : 0);
1306 	}
1307 
1308 	/* If any commands have completed, run the software queue. */
1309 	if (found)
1310 		twe_ccb_enqueue(sc, NULL);
1311 }
1312 
1313 /*
1314  * Wait for `status' to be set in the controller status register.  Return
1315  * zero if found, non-zero if the operation timed out.
1316  */
1317 static int
1318 twe_status_wait(struct twe_softc *sc, u_int32_t status, int timo)
1319 {
1320 
1321 	for (timo *= 10; timo != 0; timo--) {
1322 		if ((twe_inl(sc, TWE_REG_STS) & status) == status)
1323 			break;
1324 		delay(100000);
1325 	}
1326 
1327 	return (timo == 0);
1328 }
1329 
1330 /*
1331  * Clear a PCI parity error.
1332  */
1333 static void
1334 twe_clear_pci_parity_error(struct twe_softc *sc)
1335 {
1336 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0x0,
1337 	    TWE_CTL_CLEAR_PARITY_ERROR);
1338 
1339 	//FreeBSD: pci_write_config(sc->twe_dev, PCIR_STATUS, TWE_PCI_CLEAR_PARITY_ERROR, 2);
1340 }
1341 
1342 
1343 /*
1344  * Clear a PCI abort.
1345  */
1346 static void
1347 twe_clear_pci_abort(struct twe_softc *sc)
1348 {
1349 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0x0, TWE_CTL_CLEAR_PCI_ABORT);
1350 
1351 	//FreeBSD: pci_write_config(sc->twe_dev, PCIR_STATUS, TWE_PCI_CLEAR_PCI_ABORT, 2);
1352 }
1353 
1354 /*
1355  * Complain if the status bits aren't what we expect.
1356  */
1357 static int
1358 twe_status_check(struct twe_softc *sc, u_int status)
1359 {
1360 	int rv;
1361 
1362 	rv = 0;
1363 
1364 	if ((status & TWE_STS_EXPECTED_BITS) != TWE_STS_EXPECTED_BITS) {
1365 		aprint_error_dev(sc->sc_dev, "missing status bits: 0x%08x\n",
1366 		    status & ~TWE_STS_EXPECTED_BITS);
1367 		rv = -1;
1368 	}
1369 
1370 	if ((status & TWE_STS_UNEXPECTED_BITS) != 0) {
1371 		aprint_error_dev(sc->sc_dev, "unexpected status bits: 0x%08x\n",
1372 		    status & TWE_STS_UNEXPECTED_BITS);
1373 		rv = -1;
1374 		if (status & TWE_STS_PCI_PARITY_ERROR) {
1375 			aprint_error_dev(sc->sc_dev, "PCI parity error: Reseat"
1376 			    " card, move card or buggy device present.\n");
1377 			twe_clear_pci_parity_error(sc);
1378 		}
1379 		if (status & TWE_STS_PCI_ABORT) {
1380 			aprint_error_dev(sc->sc_dev, "PCI abort, clearing.\n");
1381 			twe_clear_pci_abort(sc);
1382 		}
1383 	}
1384 
1385 	return (rv);
1386 }
1387 
1388 /*
1389  * Allocate and initialise a CCB.
1390  */
1391 static inline void
1392 twe_ccb_init(struct twe_softc *sc, struct twe_ccb *ccb, int flags)
1393 {
1394 	struct twe_cmd *tc;
1395 
1396 	ccb->ccb_tx.tx_handler = NULL;
1397 	ccb->ccb_flags = flags;
1398 	tc = ccb->ccb_cmd;
1399 	tc->tc_status = 0;
1400 	tc->tc_flags = 0;
1401 	tc->tc_cmdid = ccb->ccb_cmdid;
1402 }
1403 
1404 struct twe_ccb *
1405 twe_ccb_alloc(struct twe_softc *sc, int flags)
1406 {
1407 	struct twe_ccb *ccb;
1408 	int s;
1409 
1410 	s = splbio();
1411 	if (__predict_false((flags & TWE_CCB_AEN) != 0)) {
1412 		/* Use the reserved CCB. */
1413 		ccb = sc->sc_ccbs;
1414 	} else {
1415 		/* Allocate a CCB and command block. */
1416 		if (__predict_false((ccb =
1417 				SLIST_FIRST(&sc->sc_ccb_freelist)) == NULL)) {
1418 			splx(s);
1419 			return (NULL);
1420 		}
1421 		SLIST_REMOVE_HEAD(&sc->sc_ccb_freelist, ccb_chain.slist);
1422 	}
1423 #ifdef DIAGNOSTIC
1424 	if ((long)(ccb - sc->sc_ccbs) == 0 && (flags & TWE_CCB_AEN) == 0)
1425 		panic("twe_ccb_alloc: got reserved CCB for non-AEN");
1426 	if ((ccb->ccb_flags & TWE_CCB_ALLOCED) != 0)
1427 		panic("twe_ccb_alloc: CCB %ld already allocated",
1428 		    (long)(ccb - sc->sc_ccbs));
1429 	flags |= TWE_CCB_ALLOCED;
1430 #endif
1431 	splx(s);
1432 
1433 	twe_ccb_init(sc, ccb, flags);
1434 	return (ccb);
1435 }
1436 
1437 struct twe_ccb *
1438 twe_ccb_alloc_wait(struct twe_softc *sc, int flags)
1439 {
1440 	struct twe_ccb *ccb;
1441 	int s;
1442 
1443 	KASSERT((flags & TWE_CCB_AEN) == 0);
1444 
1445 	s = splbio();
1446 	while (__predict_false((ccb =
1447 				SLIST_FIRST(&sc->sc_ccb_freelist)) == NULL)) {
1448 		sc->sc_flags |= TWEF_WAIT_CCB;
1449 		(void) tsleep(&sc->sc_ccb_freelist, PRIBIO, "tweccb", 0);
1450 	}
1451 	SLIST_REMOVE_HEAD(&sc->sc_ccb_freelist, ccb_chain.slist);
1452 #ifdef DIAGNOSTIC
1453 	if ((ccb->ccb_flags & TWE_CCB_ALLOCED) != 0)
1454 		panic("twe_ccb_alloc_wait: CCB %ld already allocated",
1455 		    (long)(ccb - sc->sc_ccbs));
1456 	flags |= TWE_CCB_ALLOCED;
1457 #endif
1458 	splx(s);
1459 
1460 	twe_ccb_init(sc, ccb, flags);
1461 	return (ccb);
1462 }
1463 
1464 /*
1465  * Free a CCB.
1466  */
1467 void
1468 twe_ccb_free(struct twe_softc *sc, struct twe_ccb *ccb)
1469 {
1470 	int s;
1471 
1472 	s = splbio();
1473 	if ((ccb->ccb_flags & TWE_CCB_AEN) == 0) {
1474 		SLIST_INSERT_HEAD(&sc->sc_ccb_freelist, ccb, ccb_chain.slist);
1475 		if (__predict_false((sc->sc_flags & TWEF_WAIT_CCB) != 0)) {
1476 			sc->sc_flags &= ~TWEF_WAIT_CCB;
1477 			wakeup(&sc->sc_ccb_freelist);
1478 		}
1479 	}
1480 	ccb->ccb_flags = 0;
1481 	splx(s);
1482 }
1483 
1484 /*
1485  * Map the specified CCB's command block and data buffer (if any) into
1486  * controller visible space.  Perform DMA synchronisation.
1487  */
1488 int
1489 twe_ccb_map(struct twe_softc *sc, struct twe_ccb *ccb)
1490 {
1491 	struct twe_cmd *tc;
1492 	int flags, nsegs, i, s, rv;
1493 	void *data;
1494 
1495 	/*
1496 	 * The data as a whole must be 512-byte aligned.
1497 	 */
1498 	if (((u_long)ccb->ccb_data & (TWE_ALIGNMENT - 1)) != 0) {
1499 		s = splvm();
1500 		/* XXX */
1501 		rv = uvm_km_kmem_alloc(kmem_va_arena,
1502 		    ccb->ccb_datasize, (VM_NOSLEEP | VM_INSTANTFIT),
1503 		    (vmem_addr_t *)&ccb->ccb_abuf);
1504 		splx(s);
1505 		data = (void *)ccb->ccb_abuf;
1506 		if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0)
1507 			memcpy(data, ccb->ccb_data, ccb->ccb_datasize);
1508 	} else {
1509 		ccb->ccb_abuf = (vaddr_t)0;
1510 		data = ccb->ccb_data;
1511 	}
1512 
1513 	/*
1514 	 * Map the data buffer into bus space and build the S/G list.
1515 	 */
1516 	rv = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap_xfer, data,
1517 	    ccb->ccb_datasize, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1518 	    ((ccb->ccb_flags & TWE_CCB_DATA_IN) ?
1519 	    BUS_DMA_READ : BUS_DMA_WRITE));
1520 	if (rv != 0) {
1521 		if (ccb->ccb_abuf != (vaddr_t)0) {
1522 			s = splvm();
1523 			/* XXX */
1524 			uvm_km_kmem_free(kmem_va_arena, ccb->ccb_abuf,
1525 			    ccb->ccb_datasize);
1526 			splx(s);
1527 		}
1528 		return (rv);
1529 	}
1530 
1531 	nsegs = ccb->ccb_dmamap_xfer->dm_nsegs;
1532 	tc = ccb->ccb_cmd;
1533 	tc->tc_size += 2 * nsegs;
1534 
1535 	/* The location of the S/G list is dependent upon command type. */
1536 	switch (tc->tc_opcode >> 5) {
1537 	case 2:
1538 		for (i = 0; i < nsegs; i++) {
1539 			tc->tc_args.param.sgl[i].tsg_address =
1540 			    htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_addr);
1541 			tc->tc_args.param.sgl[i].tsg_length =
1542 			    htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_len);
1543 		}
1544 		/* XXX Needed? */
1545 		for (; i < TWE_SG_SIZE; i++) {
1546 			tc->tc_args.param.sgl[i].tsg_address = 0;
1547 			tc->tc_args.param.sgl[i].tsg_length = 0;
1548 		}
1549 		break;
1550 	case 3:
1551 		for (i = 0; i < nsegs; i++) {
1552 			tc->tc_args.io.sgl[i].tsg_address =
1553 			    htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_addr);
1554 			tc->tc_args.io.sgl[i].tsg_length =
1555 			    htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_len);
1556 		}
1557 		/* XXX Needed? */
1558 		for (; i < TWE_SG_SIZE; i++) {
1559 			tc->tc_args.io.sgl[i].tsg_address = 0;
1560 			tc->tc_args.io.sgl[i].tsg_length = 0;
1561 		}
1562 		break;
1563 	default:
1564 		/*
1565 		 * In all likelihood, this is a command passed from
1566 		 * management tools in userspace where no S/G list is
1567 		 * necessary because no data is being passed.
1568 		 */
1569 		break;
1570 	}
1571 
1572 	if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0)
1573 		flags = BUS_DMASYNC_PREREAD;
1574 	else
1575 		flags = 0;
1576 	if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0)
1577 		flags |= BUS_DMASYNC_PREWRITE;
1578 
1579 	bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0,
1580 	    ccb->ccb_datasize, flags);
1581 	return (0);
1582 }
1583 
1584 /*
1585  * Unmap the specified CCB's command block and data buffer (if any) and
1586  * perform DMA synchronisation.
1587  */
1588 void
1589 twe_ccb_unmap(struct twe_softc *sc, struct twe_ccb *ccb)
1590 {
1591 	int flags, s;
1592 
1593 	if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0)
1594 		flags = BUS_DMASYNC_POSTREAD;
1595 	else
1596 		flags = 0;
1597 	if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0)
1598 		flags |= BUS_DMASYNC_POSTWRITE;
1599 
1600 	bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0,
1601 	    ccb->ccb_datasize, flags);
1602 	bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap_xfer);
1603 
1604 	if (ccb->ccb_abuf != (vaddr_t)0) {
1605 		if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0)
1606 			memcpy(ccb->ccb_data, (void *)ccb->ccb_abuf,
1607 			    ccb->ccb_datasize);
1608 		s = splvm();
1609 		/* XXX */
1610 		uvm_km_kmem_free(kmem_va_arena, ccb->ccb_abuf,
1611 		    ccb->ccb_datasize);
1612 		splx(s);
1613 	}
1614 }
1615 
1616 /*
1617  * Submit a command to the controller and poll on completion.  Return
1618  * non-zero on timeout (but don't check status, as some command types don't
1619  * return status).  Must be called with interrupts blocked.
1620  */
1621 int
1622 twe_ccb_poll(struct twe_softc *sc, struct twe_ccb *ccb, int timo)
1623 {
1624 	int rv;
1625 
1626 	if ((rv = twe_ccb_submit(sc, ccb)) != 0)
1627 		return (rv);
1628 
1629 	for (timo *= 1000; timo != 0; timo--) {
1630 		twe_poll(sc);
1631 		if ((ccb->ccb_flags & TWE_CCB_COMPLETE) != 0)
1632 			break;
1633 		DELAY(100);
1634 	}
1635 
1636 	return (timo == 0);
1637 }
1638 
1639 /*
1640  * If a CCB is specified, enqueue it.  Pull CCBs off the software queue in
1641  * the order that they were enqueued and try to submit their command blocks
1642  * to the controller for execution.
1643  */
1644 void
1645 twe_ccb_enqueue(struct twe_softc *sc, struct twe_ccb *ccb)
1646 {
1647 	int s;
1648 
1649 	s = splbio();
1650 
1651 	if (ccb != NULL)
1652 		SIMPLEQ_INSERT_TAIL(&sc->sc_ccb_queue, ccb, ccb_chain.simpleq);
1653 
1654 	while ((ccb = SIMPLEQ_FIRST(&sc->sc_ccb_queue)) != NULL) {
1655 		if (twe_ccb_submit(sc, ccb))
1656 			break;
1657 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_queue, ccb_chain.simpleq);
1658 	}
1659 
1660 	splx(s);
1661 }
1662 
1663 /*
1664  * Submit the command block associated with the specified CCB to the
1665  * controller for execution.  Must be called with interrupts blocked.
1666  */
1667 int
1668 twe_ccb_submit(struct twe_softc *sc, struct twe_ccb *ccb)
1669 {
1670 	bus_addr_t pa;
1671 	int rv;
1672 	u_int status;
1673 
1674 	/* Check to see if we can post a command. */
1675 	status = twe_inl(sc, TWE_REG_STS);
1676 	twe_status_check(sc, status);
1677 
1678 	if ((status & TWE_STS_CMD_QUEUE_FULL) == 0) {
1679 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1680 		    (char *)ccb->ccb_cmd - (char *)sc->sc_cmds,
1681 		    sizeof(struct twe_cmd),
1682 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1683 #ifdef DIAGNOSTIC
1684 		if ((ccb->ccb_flags & TWE_CCB_ALLOCED) == 0)
1685 			panic("%s: CCB %ld not ALLOCED\n",
1686 			    device_xname(sc->sc_dev), (long)(ccb - sc->sc_ccbs));
1687 #endif
1688 		ccb->ccb_flags |= TWE_CCB_ACTIVE;
1689 		pa = sc->sc_cmds_paddr +
1690 		    ccb->ccb_cmdid * sizeof(struct twe_cmd);
1691 		twe_outl(sc, TWE_REG_CMD_QUEUE, (u_int32_t)pa);
1692 		rv = 0;
1693 	} else
1694 		rv = EBUSY;
1695 
1696 	return (rv);
1697 }
1698 
1699 
1700 /*
1701  * Accept an open operation on the control device.
1702  */
1703 static int
1704 tweopen(dev_t dev, int flag, int mode, struct lwp *l)
1705 {
1706 	struct twe_softc *twe;
1707 
1708 	if ((twe = device_lookup_private(&twe_cd, minor(dev))) == NULL)
1709 		return (ENXIO);
1710 	if ((twe->sc_flags & TWEF_OPEN) != 0)
1711 		return (EBUSY);
1712 
1713 	twe->sc_flags |= TWEF_OPEN;
1714 	return (0);
1715 }
1716 
1717 /*
1718  * Accept the last close on the control device.
1719  */
1720 static int
1721 tweclose(dev_t dev, int flag, int mode,
1722     struct lwp *l)
1723 {
1724 	struct twe_softc *twe;
1725 
1726 	twe = device_lookup_private(&twe_cd, minor(dev));
1727 	twe->sc_flags &= ~TWEF_OPEN;
1728 	return (0);
1729 }
1730 
1731 void
1732 twe_ccb_wait_handler(struct twe_ccb *ccb, int error)
1733 {
1734 
1735 	/* Just wake up the sleeper. */
1736 	wakeup(ccb);
1737 }
1738 
1739 /*
1740  * Handle control operations.
1741  */
1742 static int
1743 tweioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
1744 {
1745 	struct twe_softc *twe;
1746 	struct twe_ccb *ccb;
1747 	struct twe_param *param;
1748 	struct twe_usercommand *tu;
1749 	struct twe_paramcommand *tp;
1750 	struct twe_drivecommand *td;
1751 	void *pdata = NULL;
1752 	int s, error = 0;
1753 	u_int8_t cmdid;
1754 
1755 	twe = device_lookup_private(&twe_cd, minor(dev));
1756 	tu = (struct twe_usercommand *)data;
1757 	tp = (struct twe_paramcommand *)data;
1758 	td = (struct twe_drivecommand *)data;
1759 
1760 	/* This is intended to be compatible with the FreeBSD interface. */
1761 	switch (cmd) {
1762 	case TWEIO_COMMAND:
1763 		error = kauth_authorize_device_passthru(l->l_cred, dev,
1764 		    KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
1765 		if (error)
1766 			return (error);
1767 
1768 		/* XXX mutex */
1769 		if (tu->tu_size > 0) {
1770 			/*
1771 			 * XXX Handle > TWE_SECTOR_SIZE?  Let's see if
1772 			 * it's really necessary, first.
1773 			 */
1774 			if (tu->tu_size > TWE_SECTOR_SIZE) {
1775 #ifdef TWE_DEBUG
1776 				printf("%s: TWEIO_COMMAND: tu_size = %zu\n",
1777 				    device_xname(twe->sc_dev), tu->tu_size);
1778 #endif
1779 				return EINVAL;
1780 			}
1781 			pdata = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_WAITOK);
1782 			error = copyin(tu->tu_data, pdata, tu->tu_size);
1783 			if (error != 0)
1784 				goto done;
1785 			ccb = twe_ccb_alloc_wait(twe,
1786 			    TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT);
1787 			KASSERT(ccb != NULL);
1788 			ccb->ccb_data = pdata;
1789 			ccb->ccb_datasize = TWE_SECTOR_SIZE;
1790 		} else {
1791 			ccb = twe_ccb_alloc_wait(twe, 0);
1792 			KASSERT(ccb != NULL);
1793 		}
1794 
1795 		ccb->ccb_tx.tx_handler = twe_ccb_wait_handler;
1796 		ccb->ccb_tx.tx_context = NULL;
1797 		ccb->ccb_tx.tx_dv = twe->sc_dev;
1798 
1799 		cmdid = ccb->ccb_cmdid;
1800 		memcpy(ccb->ccb_cmd, &tu->tu_cmd, sizeof(struct twe_cmd));
1801 		ccb->ccb_cmd->tc_cmdid = cmdid;
1802 
1803 		/* Map the transfer. */
1804 		if ((error = twe_ccb_map(twe, ccb)) != 0) {
1805 			twe_ccb_free(twe, ccb);
1806 			goto done;
1807 		}
1808 
1809 		/* Submit the command and wait up to 1 minute. */
1810 		error = 0;
1811 		twe_ccb_enqueue(twe, ccb);
1812 		s = splbio();
1813 		while ((ccb->ccb_flags & TWE_CCB_COMPLETE) == 0)
1814 			if ((error = tsleep(ccb, PRIBIO, "tweioctl",
1815 					    60 * hz)) != 0)
1816 				break;
1817 		splx(s);
1818 
1819 		/* Copy the command back to the ioctl argument. */
1820 		memcpy(&tu->tu_cmd, ccb->ccb_cmd, sizeof(struct twe_cmd));
1821 #ifdef TWE_DEBUG
1822 		printf("%s: TWEIO_COMMAND: tc_opcode = 0x%02x, "
1823 		    "tc_status = 0x%02x\n", device_xname(twe->sc_dev),
1824 		    tu->tu_cmd.tc_opcode, tu->tu_cmd.tc_status);
1825 #endif
1826 
1827 		s = splbio();
1828 		twe_ccb_free(twe, ccb);
1829 		splx(s);
1830 
1831 		if (tu->tu_size > 0)
1832 			error = copyout(pdata, tu->tu_data, tu->tu_size);
1833 		goto done;
1834 
1835 	case TWEIO_STATS:
1836 		return (ENOENT);
1837 
1838 	case TWEIO_AEN_POLL:
1839 		s = splbio();
1840 		*(u_int *)data = twe_aen_dequeue(twe);
1841 		splx(s);
1842 		return (0);
1843 
1844 	case TWEIO_AEN_WAIT:
1845 		s = splbio();
1846 		while ((*(u_int *)data =
1847 		    twe_aen_dequeue(twe)) == TWE_AEN_QUEUE_EMPTY) {
1848 			twe->sc_flags |= TWEF_AENQ_WAIT;
1849 			error = tsleep(&twe->sc_aen_queue, PRIBIO | PCATCH,
1850 			    "tweaen", 0);
1851 			if (error == EINTR) {
1852 				splx(s);
1853 				return (error);
1854 			}
1855 		}
1856 		splx(s);
1857 		return (0);
1858 
1859 	case TWEIO_GET_PARAM:
1860 		error = twe_param_get(twe, tp->tp_table_id, tp->tp_param_id,
1861 		    tp->tp_size, 0, &param);
1862 		if (error != 0)
1863 			return (error);
1864 		if (param->tp_param_size > tp->tp_size) {
1865 			error = EFAULT;
1866 			goto done;
1867 		}
1868 		error = copyout(param->tp_data, tp->tp_data,
1869 		    param->tp_param_size);
1870 		free(param, M_DEVBUF);
1871 		goto done;
1872 
1873 	case TWEIO_SET_PARAM:
1874 		pdata = malloc(tp->tp_size, M_DEVBUF, M_WAITOK);
1875 		if ((error = copyin(tp->tp_data, pdata, tp->tp_size)) != 0)
1876 			goto done;
1877 		error = twe_param_set(twe, tp->tp_table_id, tp->tp_param_id,
1878 		    tp->tp_size, pdata);
1879 		goto done;
1880 
1881 	case TWEIO_RESET:
1882 		s = splbio();
1883 		twe_reset(twe);
1884 		splx(s);
1885 		return (0);
1886 
1887 	case TWEIO_ADD_UNIT:
1888 		/* XXX mutex */
1889 		return (twe_add_unit(twe, td->td_unit));
1890 
1891 	case TWEIO_DEL_UNIT:
1892 		/* XXX mutex */
1893 		return (twe_del_unit(twe, td->td_unit));
1894 
1895 	default:
1896 		return EINVAL;
1897 	}
1898 done:
1899 	if (pdata)
1900 		free(pdata, M_DEVBUF);
1901 	return error;
1902 }
1903 
1904 const struct cdevsw twe_cdevsw = {
1905 	.d_open = tweopen,
1906 	.d_close = tweclose,
1907 	.d_read = noread,
1908 	.d_write = nowrite,
1909 	.d_ioctl = tweioctl,
1910 	.d_stop = nostop,
1911 	.d_tty = notty,
1912 	.d_poll = nopoll,
1913 	.d_mmap = nommap,
1914 	.d_kqfilter = nokqfilter,
1915 	.d_discard = nodiscard,
1916 	.d_flag = D_OTHER
1917 };
1918 
1919 /*
1920  * Print some information about the controller
1921  */
1922 static void
1923 twe_describe_controller(struct twe_softc *sc)
1924 {
1925 	struct twe_param *p[6];
1926 	int i, rv = 0;
1927 	uint32_t dsize;
1928 	uint8_t ports;
1929 
1930 	ports = 0;
1931 
1932 	/* get the port count */
1933 	rv |= twe_param_get_1(sc, TWE_PARAM_CONTROLLER,
1934 		TWE_PARAM_CONTROLLER_PortCount, &ports);
1935 
1936 	/* get version strings */
1937 	rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_Mon,
1938 		16, NULL, &p[0]);
1939 	rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_FW,
1940 		16, NULL, &p[1]);
1941 	rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_BIOS,
1942 		16, NULL, &p[2]);
1943 	rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_PCB,
1944 		8, NULL, &p[3]);
1945 	rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_ATA,
1946 		8, NULL, &p[4]);
1947 	rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_PCI,
1948 		8, NULL, &p[5]);
1949 
1950 	if (rv) {
1951 		/* some error occurred */
1952 		aprint_error_dev(sc->sc_dev,
1953 		    "failed to fetch version information\n");
1954 		return;
1955 	}
1956 
1957 	aprint_normal_dev(sc->sc_dev, "%d ports, Firmware %.16s, BIOS %.16s\n",
1958 	    ports, p[1]->tp_data, p[2]->tp_data);
1959 
1960 	aprint_verbose_dev(sc->sc_dev,
1961 	    "Monitor %.16s, PCB %.8s, Achip %.8s, Pchip %.8s\n",
1962 	    p[0]->tp_data, p[3]->tp_data,
1963 	    p[4]->tp_data, p[5]->tp_data);
1964 
1965 	free(p[0], M_DEVBUF);
1966 	free(p[1], M_DEVBUF);
1967 	free(p[2], M_DEVBUF);
1968 	free(p[3], M_DEVBUF);
1969 	free(p[4], M_DEVBUF);
1970 	free(p[5], M_DEVBUF);
1971 
1972 	rv = twe_param_get(sc, TWE_PARAM_DRIVESUMMARY,
1973 	    TWE_PARAM_DRIVESUMMARY_Status, 16, NULL, &p[0]);
1974 	if (rv) {
1975 		aprint_error_dev(sc->sc_dev,
1976 		    "failed to get drive status summary\n");
1977 		return;
1978 	}
1979 	for (i = 0; i < ports; i++) {
1980 		if (p[0]->tp_data[i] != TWE_PARAM_DRIVESTATUS_Present)
1981 			continue;
1982 		rv = twe_param_get_4(sc, TWE_PARAM_DRIVEINFO + i,
1983 		    TWE_PARAM_DRIVEINFO_Size, &dsize);
1984 		if (rv) {
1985 			aprint_error_dev(sc->sc_dev,
1986 			    "unable to get drive size for port %d\n", i);
1987 			continue;
1988 		}
1989 		rv = twe_param_get(sc, TWE_PARAM_DRIVEINFO + i,
1990 		    TWE_PARAM_DRIVEINFO_Model, 40, NULL, &p[1]);
1991 		if (rv) {
1992 			aprint_error_dev(sc->sc_dev,
1993 			    "unable to get drive model for port %d\n", i);
1994 			continue;
1995 		}
1996 		aprint_verbose_dev(sc->sc_dev, "port %d: %.40s %d MB\n",
1997 		    i, p[1]->tp_data, dsize / 2048);
1998 		free(p[1], M_DEVBUF);
1999 	}
2000 	free(p[0], M_DEVBUF);
2001 }
2002 
2003 MODULE(MODULE_CLASS_DRIVER, twe, "pci");
2004 
2005 #ifdef _MODULE
2006 #include "ioconf.c"
2007 #endif
2008 
2009 static int
2010 twe_modcmd(modcmd_t cmd, void *opaque)
2011 {
2012 	int error = 0;
2013 
2014 #ifdef _MODULE
2015 	switch (cmd) {
2016 	case MODULE_CMD_INIT:
2017 		error = config_init_component(cfdriver_ioconf_twe,
2018 		    cfattach_ioconf_twe, cfdata_ioconf_twe);
2019 		break;
2020 	case MODULE_CMD_FINI:
2021 		error = config_fini_component(cfdriver_ioconf_twe,
2022 		    cfattach_ioconf_twe, cfdata_ioconf_twe);
2023 		break;
2024 	default:
2025 		error = ENOTTY;
2026 		break;
2027 	}
2028 #endif
2029 
2030 	return error;
2031 }
2032