xref: /netbsd-src/sys/dev/pci/twe.c (revision 181254a7b1bdde6873432bffef2d2decc4b5c22f)
1 /*	$NetBSD: twe.c,v 1.108 2019/11/10 21:16:36 chs Exp $	*/
2 
3 /*-
4  * Copyright (c) 2000, 2001, 2002, 2003, 2004 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran; and by Jason R. Thorpe of Wasabi Systems, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*-
33  * Copyright (c) 2000 Michael Smith
34  * Copyright (c) 2000 BSDi
35  * All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  * 1. Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  * 2. Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in the
44  *    documentation and/or other materials provided with the distribution.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
47  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
50  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56  * SUCH DAMAGE.
57  *
58  * from FreeBSD: twe.c,v 1.1 2000/05/24 23:35:23 msmith Exp
59  */
60 
61 /*
62  * Driver for the 3ware Escalade family of RAID controllers.
63  */
64 
65 #include <sys/cdefs.h>
66 __KERNEL_RCSID(0, "$NetBSD: twe.c,v 1.108 2019/11/10 21:16:36 chs Exp $");
67 
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/kernel.h>
71 #include <sys/device.h>
72 #include <sys/queue.h>
73 #include <sys/proc.h>
74 #include <sys/buf.h>
75 #include <sys/endian.h>
76 #include <sys/malloc.h>
77 #include <sys/conf.h>
78 #include <sys/disk.h>
79 #include <sys/sysctl.h>
80 #include <sys/syslog.h>
81 #include <sys/kauth.h>
82 #include <sys/module.h>
83 #include <sys/bswap.h>
84 #include <sys/bus.h>
85 
86 #include <dev/pci/pcireg.h>
87 #include <dev/pci/pcivar.h>
88 #include <dev/pci/pcidevs.h>
89 #include <dev/pci/twereg.h>
90 #include <dev/pci/twevar.h>
91 #include <dev/pci/tweio.h>
92 
93 #include "locators.h"
94 #include "ioconf.h"
95 
96 #define	PCI_CBIO	0x10
97 
98 static int	twe_aen_get(struct twe_softc *, uint16_t *);
99 static void	twe_aen_handler(struct twe_ccb *, int);
100 static void	twe_aen_enqueue(struct twe_softc *sc, uint16_t, int);
101 static uint16_t	twe_aen_dequeue(struct twe_softc *);
102 
103 static void	twe_attach(device_t, device_t, void *);
104 static int	twe_rescan(device_t, const char *, const int *);
105 static int	twe_init_connection(struct twe_softc *);
106 static int	twe_intr(void *);
107 static int	twe_match(device_t, cfdata_t, void *);
108 static int	twe_param_set(struct twe_softc *, int, int, size_t, void *);
109 static void	twe_poll(struct twe_softc *);
110 static int	twe_print(void *, const char *);
111 static int	twe_reset(struct twe_softc *);
112 static int	twe_status_check(struct twe_softc *, u_int);
113 static int	twe_status_wait(struct twe_softc *, u_int, int);
114 static void	twe_describe_controller(struct twe_softc *);
115 static void	twe_clear_pci_abort(struct twe_softc *sc);
116 static void	twe_clear_pci_parity_error(struct twe_softc *sc);
117 
118 static int	twe_add_unit(struct twe_softc *, int);
119 static int	twe_del_unit(struct twe_softc *, int);
120 static int	twe_init_connection(struct twe_softc *);
121 
122 static inline u_int32_t	twe_inl(struct twe_softc *, int);
123 static inline void twe_outl(struct twe_softc *, int, u_int32_t);
124 
125 extern struct	cfdriver twe_cd;
126 
127 CFATTACH_DECL3_NEW(twe, sizeof(struct twe_softc),
128     twe_match, twe_attach, NULL, NULL, twe_rescan, NULL, 0);
129 
130 /* FreeBSD driver revision for sysctl expected by the 3ware cli */
131 const char twever[] = "1.50.01.002";
132 
133 /*
134  * Tables to convert numeric codes to strings.
135  */
136 const struct twe_code_table twe_table_status[] = {
137 	{ 0x00,	"successful completion" },
138 
139 	/* info */
140 	{ 0x42,	"command in progress" },
141 	{ 0x6c,	"retrying interface CRC error from UDMA command" },
142 
143 	/* warning */
144 	{ 0x81,	"redundant/inconsequential request ignored" },
145 	{ 0x8e,	"failed to write zeroes to LBA 0" },
146 	{ 0x8f,	"failed to profile TwinStor zones" },
147 
148 	/* fatal */
149 	{ 0xc1,	"aborted due to system command or reconfiguration" },
150 	{ 0xc4,	"aborted" },
151 	{ 0xc5,	"access error" },
152 	{ 0xc6,	"access violation" },
153 	{ 0xc7,	"device failure" },	/* high byte may be port # */
154 	{ 0xc8,	"controller error" },
155 	{ 0xc9,	"timed out" },
156 	{ 0xcb,	"invalid unit number" },
157 	{ 0xcf,	"unit not available" },
158 	{ 0xd2,	"undefined opcode" },
159 	{ 0xdb,	"request incompatible with unit" },
160 	{ 0xdc,	"invalid request" },
161 	{ 0xff,	"firmware error, reset requested" },
162 
163 	{ 0,	NULL }
164 };
165 
166 const struct twe_code_table twe_table_unitstate[] = {
167 	{ TWE_PARAM_UNITSTATUS_Normal,		"Normal" },
168 	{ TWE_PARAM_UNITSTATUS_Initialising,	"Initializing" },
169 	{ TWE_PARAM_UNITSTATUS_Degraded,	"Degraded" },
170 	{ TWE_PARAM_UNITSTATUS_Rebuilding,	"Rebuilding" },
171 	{ TWE_PARAM_UNITSTATUS_Verifying,	"Verifying" },
172 	{ TWE_PARAM_UNITSTATUS_Corrupt,		"Corrupt" },
173 	{ TWE_PARAM_UNITSTATUS_Missing,		"Missing" },
174 
175 	{ 0,					NULL }
176 };
177 
178 const struct twe_code_table twe_table_unittype[] = {
179 	/* array descriptor configuration */
180 	{ TWE_AD_CONFIG_RAID0,			"RAID0" },
181 	{ TWE_AD_CONFIG_RAID1,			"RAID1" },
182 	{ TWE_AD_CONFIG_TwinStor,		"TwinStor" },
183 	{ TWE_AD_CONFIG_RAID5,			"RAID5" },
184 	{ TWE_AD_CONFIG_RAID10,			"RAID10" },
185 	{ TWE_UD_CONFIG_JBOD,			"JBOD" },
186 
187 	{ 0,					NULL }
188 };
189 
190 const struct twe_code_table twe_table_stripedepth[] = {
191 	{ TWE_AD_STRIPE_4k,			"4K" },
192 	{ TWE_AD_STRIPE_8k,			"8K" },
193 	{ TWE_AD_STRIPE_16k,			"16K" },
194 	{ TWE_AD_STRIPE_32k,			"32K" },
195 	{ TWE_AD_STRIPE_64k,			"64K" },
196 	{ TWE_AD_STRIPE_128k,			"128K" },
197 	{ TWE_AD_STRIPE_256k,			"256K" },
198 	{ TWE_AD_STRIPE_512k,			"512K" },
199 	{ TWE_AD_STRIPE_1024k,			"1024K" },
200 
201 	{ 0,					NULL }
202 };
203 
204 /*
205  * Asynchronous event notification messages are qualified:
206  *	a - not unit/port specific
207  *	u - unit specific
208  *	p - port specific
209  *
210  * They are further qualified with a severity:
211  *	E - LOG_EMERG
212  *	a - LOG_ALERT
213  *	c - LOG_CRIT
214  *	e - LOG_ERR
215  *	w - LOG_WARNING
216  *	n - LOG_NOTICE
217  *	i - LOG_INFO
218  *	d - LOG_DEBUG
219  *	blank - just use printf
220  */
221 const struct twe_code_table twe_table_aen[] = {
222 	{ 0x00,	"a  queue empty" },
223 	{ 0x01,	"a  soft reset" },
224 	{ 0x02,	"uc degraded mode" },
225 	{ 0x03,	"aa controller error" },
226 	{ 0x04,	"uE rebuild fail" },
227 	{ 0x05,	"un rebuild done" },
228 	{ 0x06,	"ue incomplete unit" },
229 	{ 0x07,	"un initialization done" },
230 	{ 0x08,	"uw unclean shutdown detected" },
231 	{ 0x09,	"pe drive timeout" },
232 	{ 0x0a,	"pc drive error" },
233 	{ 0x0b,	"un rebuild started" },
234 	{ 0x0c,	"un initialization started" },
235 	{ 0x0d,	"ui logical unit deleted" },
236 	{ 0x0f,	"pc SMART threshold exceeded" },
237 	{ 0x15,	"a  table undefined" },	/* XXX: Not in FreeBSD's table */
238 	{ 0x21,	"pe ATA UDMA downgrade" },
239 	{ 0x22,	"pi ATA UDMA upgrade" },
240 	{ 0x23,	"pw sector repair occurred" },
241 	{ 0x24,	"aa SBUF integrity check failure" },
242 	{ 0x25,	"pa lost cached write" },
243 	{ 0x26,	"pa drive ECC error detected" },
244 	{ 0x27,	"pe DCB checksum error" },
245 	{ 0x28,	"pn DCB unsupported version" },
246 	{ 0x29,	"ui verify started" },
247 	{ 0x2a,	"ua verify failed" },
248 	{ 0x2b,	"ui verify complete" },
249 	{ 0x2c,	"pw overwrote bad sector during rebuild" },
250 	{ 0x2d,	"pa encountered bad sector during rebuild" },
251 	{ 0x2e,	"pe replacement drive too small" },
252 	{ 0x2f,	"ue array not previously initialized" },
253 	{ 0x30,	"p  drive not supported" },
254 	{ 0xff,	"a  aen queue full" },
255 
256 	{ 0,	NULL },
257 };
258 
259 const char *
260 twe_describe_code(const struct twe_code_table *table, uint32_t code)
261 {
262 
263 	for (; table->string != NULL; table++) {
264 		if (table->code == code)
265 			return (table->string);
266 	}
267 	return (NULL);
268 }
269 
270 static inline u_int32_t
271 twe_inl(struct twe_softc *sc, int off)
272 {
273 
274 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
275 	    BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
276 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
277 }
278 
279 static inline void
280 twe_outl(struct twe_softc *sc, int off, u_int32_t val)
281 {
282 
283 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
284 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
285 	    BUS_SPACE_BARRIER_WRITE);
286 }
287 
288 /*
289  * Match a supported board.
290  */
291 static int
292 twe_match(device_t parent, cfdata_t cfdata, void *aux)
293 {
294 	struct pci_attach_args *pa;
295 
296 	pa = aux;
297 
298 	return (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_3WARE &&
299 	    (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_3WARE_ESCALADE ||
300 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_3WARE_ESCALADE_ASIC));
301 }
302 
303 /*
304  * Attach a supported board.
305  *
306  * XXX This doesn't fail gracefully.
307  */
308 static void
309 twe_attach(device_t parent, device_t self, void *aux)
310 {
311 	struct pci_attach_args *pa;
312 	struct twe_softc *sc;
313 	pci_chipset_tag_t pc;
314 	pci_intr_handle_t ih;
315 	pcireg_t csr;
316 	const char *intrstr;
317 	int s, size, i, rv, rseg;
318 	size_t max_segs, max_xfer;
319 	bus_dma_segment_t seg;
320 	const struct sysctlnode *node;
321 	struct twe_cmd *tc;
322 	struct twe_ccb *ccb;
323 	char intrbuf[PCI_INTRSTR_LEN];
324 
325 	sc = device_private(self);
326 	sc->sc_dev = self;
327 	pa = aux;
328 	pc = pa->pa_pc;
329 	sc->sc_dmat = pa->pa_dmat;
330 	SIMPLEQ_INIT(&sc->sc_ccb_queue);
331 	SLIST_INIT(&sc->sc_ccb_freelist);
332 
333 	aprint_naive(": RAID controller\n");
334 	aprint_normal(": 3ware Escalade\n");
335 
336 
337 	if (pci_mapreg_map(pa, PCI_CBIO, PCI_MAPREG_TYPE_IO, 0,
338 	    &sc->sc_iot, &sc->sc_ioh, NULL, NULL)) {
339 		aprint_error_dev(self, "can't map i/o space\n");
340 		return;
341 	}
342 
343 	/* Enable the device. */
344 	csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
345 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
346 	    csr | PCI_COMMAND_MASTER_ENABLE);
347 
348 	/* Map and establish the interrupt. */
349 	if (pci_intr_map(pa, &ih)) {
350 		aprint_error_dev(self, "can't map interrupt\n");
351 		return;
352 	}
353 
354 	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
355 	sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_BIO, twe_intr, sc,
356 	    device_xname(self));
357 	if (sc->sc_ih == NULL) {
358 		aprint_error_dev(self, "can't establish interrupt%s%s\n",
359 			(intrstr) ? " at " : "",
360 			(intrstr) ? intrstr : "");
361 		return;
362 	}
363 
364 	if (intrstr != NULL)
365 		aprint_normal_dev(self, "interrupting at %s\n", intrstr);
366 
367 	/*
368 	 * Allocate and initialise the command blocks and CCBs.
369 	 */
370 	size = sizeof(struct twe_cmd) * TWE_MAX_QUEUECNT;
371 
372 	if ((rv = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &seg, 1,
373 	    &rseg, BUS_DMA_NOWAIT)) != 0) {
374 		aprint_error_dev(self,
375 		    "unable to allocate commands, rv = %d\n", rv);
376 		return;
377 	}
378 
379 	if ((rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, size,
380 	    (void **)&sc->sc_cmds,
381 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
382 		aprint_error_dev(self,
383 		    "unable to map commands, rv = %d\n", rv);
384 		return;
385 	}
386 
387 	if ((rv = bus_dmamap_create(sc->sc_dmat, size, size, 1, 0,
388 	    BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
389 		aprint_error_dev(self,
390 		    "unable to create command DMA map, rv = %d\n", rv);
391 		return;
392 	}
393 
394 	if ((rv = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, sc->sc_cmds,
395 	    size, NULL, BUS_DMA_NOWAIT)) != 0) {
396 		aprint_error_dev(self,
397 		    "unable to load command DMA map, rv = %d\n", rv);
398 		return;
399 	}
400 
401 	sc->sc_cmds_paddr = sc->sc_dmamap->dm_segs[0].ds_addr;
402 	memset(sc->sc_cmds, 0, size);
403 
404 	tc = (struct twe_cmd *)sc->sc_cmds;
405 	max_segs = twe_get_maxsegs();
406 	max_xfer = twe_get_maxxfer(max_segs);
407 
408 	ccb = malloc(sizeof(*ccb) * TWE_MAX_QUEUECNT, M_DEVBUF, M_WAITOK);
409 	sc->sc_ccbs = ccb;
410 
411 	for (i = 0; i < TWE_MAX_QUEUECNT; i++, tc++, ccb++) {
412 		ccb->ccb_cmd = tc;
413 		ccb->ccb_cmdid = i;
414 		ccb->ccb_flags = 0;
415 		rv = bus_dmamap_create(sc->sc_dmat, max_xfer,
416 		    max_segs, PAGE_SIZE, 0,
417 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
418 		    &ccb->ccb_dmamap_xfer);
419 		if (rv != 0) {
420 			aprint_error_dev(self,
421 			    "can't create dmamap, rv = %d\n", rv);
422 			return;
423 		}
424 
425 		/* Save the first CCB for AEN retrieval. */
426 		if (i != 0)
427 			SLIST_INSERT_HEAD(&sc->sc_ccb_freelist, ccb,
428 			    ccb_chain.slist);
429 	}
430 
431 	/* Wait for the controller to become ready. */
432 	if (twe_status_wait(sc, TWE_STS_MICROCONTROLLER_READY, 6)) {
433 		aprint_error_dev(self, "microcontroller not ready\n");
434 		return;
435 	}
436 
437 	twe_outl(sc, TWE_REG_CTL, TWE_CTL_DISABLE_INTRS);
438 
439 	/* Reset the controller. */
440 	s = splbio();
441 	rv = twe_reset(sc);
442 	splx(s);
443 	if (rv) {
444 		aprint_error_dev(self, "reset failed\n");
445 		return;
446 	}
447 
448 	/* Initialise connection with controller. */
449 	twe_init_connection(sc);
450 
451 	twe_describe_controller(sc);
452 
453 	/* Find and attach RAID array units. */
454 	twe_rescan(self, "twe", 0);
455 
456 	/* ...and finally, enable interrupts. */
457 	twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR |
458 	    TWE_CTL_UNMASK_RESP_INTR |
459 	    TWE_CTL_ENABLE_INTRS);
460 
461 	/* sysctl set-up for 3ware cli */
462 	if (sysctl_createv(NULL, 0, NULL, &node,
463 				0, CTLTYPE_NODE, device_xname(self),
464 				SYSCTL_DESCR("twe driver information"),
465 				NULL, 0, NULL, 0,
466 				CTL_HW, CTL_CREATE, CTL_EOL) != 0) {
467 		aprint_error_dev(self, "could not create %s.%s sysctl node\n",
468 		    "hw", device_xname(self));
469 		return;
470 	}
471 	if ((i = sysctl_createv(NULL, 0, NULL, NULL,
472 				0, CTLTYPE_STRING, "driver_version",
473 				SYSCTL_DESCR("twe0 driver version"),
474 				NULL, 0, __UNCONST(&twever), 0,
475 				CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL))
476 				!= 0) {
477 		aprint_error_dev(self,
478 		    "could not create %s.%s.driver_version sysctl\n",
479 		    "hw", device_xname(self));
480 		return;
481 	}
482 }
483 
484 static int
485 twe_rescan(device_t self, const char *attr, const int *flags)
486 {
487 	struct twe_softc *sc;
488 	int i;
489 
490 	sc = device_private(self);
491 	sc->sc_nunits = 0;
492 	for (i = 0; i < TWE_MAX_UNITS; i++)
493 		(void) twe_add_unit(sc, i);
494 	return 0;
495 }
496 
497 
498 void
499 twe_register_callbacks(struct twe_softc *sc, int unit,
500     const struct twe_callbacks *tcb)
501 {
502 
503 	sc->sc_units[unit].td_callbacks = tcb;
504 }
505 
506 static void
507 twe_recompute_openings(struct twe_softc *sc)
508 {
509 	struct twe_drive *td;
510 	int unit, openings;
511 
512 	if (sc->sc_nunits != 0)
513 		openings = (TWE_MAX_QUEUECNT - 1) / sc->sc_nunits;
514 	else
515 		openings = 0;
516 	if (openings == sc->sc_openings)
517 		return;
518 	sc->sc_openings = openings;
519 
520 #ifdef TWE_DEBUG
521 	printf("%s: %d array%s, %d openings per array\n",
522 	    device_xname(sc->sc_dev), sc->sc_nunits,
523 	    sc->sc_nunits == 1 ? "" : "s", sc->sc_openings);
524 #endif
525 
526 	for (unit = 0; unit < TWE_MAX_UNITS; unit++) {
527 		td = &sc->sc_units[unit];
528 		if (td->td_dev != NULL)
529 			(*td->td_callbacks->tcb_openings)(td->td_dev,
530 			    sc->sc_openings);
531 	}
532 }
533 
534 static int
535 twe_add_unit(struct twe_softc *sc, int unit)
536 {
537 	struct twe_param *dtp, *atp;
538 	struct twe_array_descriptor *ad;
539 	struct twe_drive *td;
540 	struct twe_attach_args twea;
541 	uint32_t newsize;
542 	int rv;
543 	uint16_t dsize;
544 	uint8_t newtype, newstripe;
545 	int locs[TWECF_NLOCS];
546 
547 	if (unit < 0 || unit >= TWE_MAX_UNITS)
548 		return (EINVAL);
549 
550 	/* Find attached units. */
551 	rv = twe_param_get(sc, TWE_PARAM_UNITSUMMARY,
552 	    TWE_PARAM_UNITSUMMARY_Status, TWE_MAX_UNITS, NULL, &dtp);
553 	if (rv != 0) {
554 		aprint_error_dev(sc->sc_dev,
555 		    "error %d fetching unit summary\n", rv);
556 		return (rv);
557 	}
558 
559 	/* For each detected unit, collect size and store in an array. */
560 	td = &sc->sc_units[unit];
561 
562 	/* Unit present? */
563 	if ((dtp->tp_data[unit] & TWE_PARAM_UNITSTATUS_Online) == 0) {
564 		/*
565 		 * XXX Should we check to see if a device has been
566 		 * XXX attached at this index and detach it if it
567 		 * XXX has?  ("rescan" semantics)
568 		 */
569 		rv = 0;
570 		goto out;
571    	}
572 
573 	rv = twe_param_get_2(sc, TWE_PARAM_UNITINFO + unit,
574 	    TWE_PARAM_UNITINFO_DescriptorSize, &dsize);
575 	if (rv != 0) {
576 		aprint_error_dev(sc->sc_dev,
577 		    "error %d fetching descriptor size for unit %d\n",
578 		    rv, unit);
579 		goto out;
580 	}
581 
582 	rv = twe_param_get(sc, TWE_PARAM_UNITINFO + unit,
583 	    TWE_PARAM_UNITINFO_Descriptor, dsize - 3, NULL, &atp);
584 	if (rv != 0) {
585 		aprint_error_dev(sc->sc_dev,
586 		    "error %d fetching array descriptor for unit %d\n",
587 		    rv, unit);
588 		goto out;
589 	}
590 
591 	ad = (struct twe_array_descriptor *)atp->tp_data;
592 	newtype = ad->configuration;
593 	newstripe = ad->stripe_size;
594 	free(atp, M_DEVBUF);
595 
596 	rv = twe_param_get_4(sc, TWE_PARAM_UNITINFO + unit,
597 	    TWE_PARAM_UNITINFO_Capacity, &newsize);
598 	if (rv != 0) {
599 		aprint_error_dev(sc->sc_dev,
600 		    "error %d fetching capacity for unit %d\n",
601 		    rv, unit);
602 		goto out;
603 	}
604 
605 	/*
606 	 * Have a device, so we need to attach it.  If there is currently
607 	 * something sitting at the slot, and the parameters are different,
608 	 * then we detach the old device before attaching the new one.
609 	 */
610 	if (td->td_dev != NULL &&
611 	    td->td_size == newsize &&
612 	    td->td_type == newtype &&
613 	    td->td_stripe == newstripe) {
614 		/* Same as the old device; just keep using it. */
615 		rv = 0;
616 		goto out;
617 	} else if (td->td_dev != NULL) {
618 		/* Detach the old device first. */
619 		(void) config_detach(td->td_dev, DETACH_FORCE);
620 		td->td_dev = NULL;
621 	} else if (td->td_size == 0)
622 		sc->sc_nunits++;
623 
624 	/*
625 	 * Committed to the new array unit; assign its parameters and
626 	 * recompute the number of available command openings.
627 	 */
628 	td->td_size = newsize;
629 	td->td_type = newtype;
630 	td->td_stripe = newstripe;
631 	twe_recompute_openings(sc);
632 
633 	twea.twea_unit = unit;
634 
635 	locs[TWECF_UNIT] = unit;
636 
637 	td->td_dev = config_found_sm_loc(sc->sc_dev, "twe", locs, &twea,
638 					 twe_print, config_stdsubmatch);
639 
640 	rv = 0;
641  out:
642 	free(dtp, M_DEVBUF);
643 	return (rv);
644 }
645 
646 static int
647 twe_del_unit(struct twe_softc *sc, int unit)
648 {
649 	struct twe_drive *td;
650 
651 	if (unit < 0 || unit >= TWE_MAX_UNITS)
652 		return (EINVAL);
653 
654 	td = &sc->sc_units[unit];
655 	if (td->td_size != 0)
656 		sc->sc_nunits--;
657 	td->td_size = 0;
658 	td->td_type = 0;
659 	td->td_stripe = 0;
660 	if (td->td_dev != NULL) {
661 		(void) config_detach(td->td_dev, DETACH_FORCE);
662 		td->td_dev = NULL;
663 	}
664 	twe_recompute_openings(sc);
665 	return (0);
666 }
667 
668 /*
669  * Reset the controller.
670  * MUST BE CALLED AT splbio()!
671  */
672 static int
673 twe_reset(struct twe_softc *sc)
674 {
675 	uint16_t aen;
676 	u_int status;
677 	int got, rv;
678 
679 	/* Issue a soft reset. */
680 	twe_outl(sc, TWE_REG_CTL, TWE_CTL_ISSUE_SOFT_RESET |
681 	    TWE_CTL_CLEAR_HOST_INTR |
682 	    TWE_CTL_CLEAR_ATTN_INTR |
683 	    TWE_CTL_MASK_CMD_INTR |
684 	    TWE_CTL_MASK_RESP_INTR |
685 	    TWE_CTL_CLEAR_ERROR_STS |
686 	    TWE_CTL_DISABLE_INTRS);
687 
688 	/* Wait for attention... */
689 	if (twe_status_wait(sc, TWE_STS_ATTN_INTR, 30)) {
690 		aprint_error_dev(sc->sc_dev,
691 		    "timeout waiting for attention interrupt\n");
692 		return (-1);
693 	}
694 
695 	/* ...and ACK it. */
696 	twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR);
697 
698 	/*
699 	 * Pull AENs out of the controller; look for a soft reset AEN.
700 	 * Open code this, since we want to detect reset even if the
701 	 * queue for management tools is full.
702 	 *
703 	 * Note that since:
704 	 *	- interrupts are blocked
705 	 *	- we have reset the controller
706 	 *	- acknowledged the pending ATTENTION
707 	 * that there is no way a pending asynchronous AEN fetch would
708 	 * finish, so clear the flag.
709 	 */
710 	sc->sc_flags &= ~TWEF_AEN;
711 	for (got = 0;;) {
712 		rv = twe_aen_get(sc, &aen);
713 		if (rv != 0)
714 			printf("%s: error %d while draining event queue\n",
715 			    device_xname(sc->sc_dev), rv);
716 		if (TWE_AEN_CODE(aen) == TWE_AEN_QUEUE_EMPTY)
717 			break;
718 		if (TWE_AEN_CODE(aen) == TWE_AEN_SOFT_RESET)
719 			got = 1;
720 		twe_aen_enqueue(sc, aen, 1);
721 	}
722 
723 	if (!got) {
724 		printf("%s: reset not reported\n", device_xname(sc->sc_dev));
725 		return (-1);
726 	}
727 
728 	/* Check controller status. */
729 	status = twe_inl(sc, TWE_REG_STS);
730 	if (twe_status_check(sc, status)) {
731 		printf("%s: controller errors detected\n",
732 		    device_xname(sc->sc_dev));
733 		return (-1);
734 	}
735 
736 	/* Drain the response queue. */
737 	for (;;) {
738 		status = twe_inl(sc, TWE_REG_STS);
739 		if (twe_status_check(sc, status) != 0) {
740 			aprint_error_dev(sc->sc_dev,
741 			    "can't drain response queue\n");
742 			return (-1);
743 		}
744 		if ((status & TWE_STS_RESP_QUEUE_EMPTY) != 0)
745 			break;
746 		(void)twe_inl(sc, TWE_REG_RESP_QUEUE);
747 	}
748 
749 	return (0);
750 }
751 
752 /*
753  * Print autoconfiguration message for a sub-device.
754  */
755 static int
756 twe_print(void *aux, const char *pnp)
757 {
758 	struct twe_attach_args *twea;
759 
760 	twea = aux;
761 
762 	if (pnp != NULL)
763 		aprint_normal("block device at %s", pnp);
764 	aprint_normal(" unit %d", twea->twea_unit);
765 	return (UNCONF);
766 }
767 
768 /*
769  * Interrupt service routine.
770  */
771 static int
772 twe_intr(void *arg)
773 {
774 	struct twe_softc *sc;
775 	u_int status;
776 	int caught, rv;
777 
778 	sc = arg;
779 	caught = 0;
780 	status = twe_inl(sc, TWE_REG_STS);
781 	twe_status_check(sc, status);
782 
783 	/* Host interrupts - purpose unknown. */
784 	if ((status & TWE_STS_HOST_INTR) != 0) {
785 #ifdef DEBUG
786 		printf("%s: host interrupt\n", device_xname(sc->sc_dev));
787 #endif
788 		twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_HOST_INTR);
789 		caught = 1;
790 	}
791 
792 	/*
793 	 * Attention interrupts, signalled when a controller or child device
794 	 * state change has occurred.
795 	 */
796 	if ((status & TWE_STS_ATTN_INTR) != 0) {
797 		rv = twe_aen_get(sc, NULL);
798 		if (rv != 0)
799 			aprint_error_dev(sc->sc_dev,
800 			    "unable to retrieve AEN (%d)\n", rv);
801 		else
802 			twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR);
803 		caught = 1;
804 	}
805 
806 	/*
807 	 * Command interrupts, signalled when the controller can accept more
808 	 * commands.  We don't use this; instead, we try to submit commands
809 	 * when we receive them, and when other commands have completed.
810 	 * Mask it so we don't get another one.
811 	 */
812 	if ((status & TWE_STS_CMD_INTR) != 0) {
813 #ifdef DEBUG
814 		printf("%s: command interrupt\n", device_xname(sc->sc_dev));
815 #endif
816 		twe_outl(sc, TWE_REG_CTL, TWE_CTL_MASK_CMD_INTR);
817 		caught = 1;
818 	}
819 
820 	if ((status & TWE_STS_RESP_INTR) != 0) {
821 		twe_poll(sc);
822 		caught = 1;
823 	}
824 
825 	return (caught);
826 }
827 
828 /*
829  * Fetch an AEN.  Even though this is really like parameter
830  * retrieval, we handle this specially, because we issue this
831  * AEN retrieval command from interrupt context, and thus
832  * reserve a CCB for it to avoid resource shortage.
833  *
834  * XXX There are still potential resource shortages we could
835  * XXX encounter.  Consider pre-allocating all AEN-related
836  * XXX resources.
837  *
838  * MUST BE CALLED AT splbio()!
839  */
840 static int
841 twe_aen_get(struct twe_softc *sc, uint16_t *aenp)
842 {
843 	struct twe_ccb *ccb;
844 	struct twe_cmd *tc;
845 	struct twe_param *tp;
846 	int rv;
847 
848 	/*
849 	 * If we're already retrieving an AEN, just wait; another
850 	 * retrieval will be chained after the current one completes.
851 	 */
852 	if (sc->sc_flags & TWEF_AEN) {
853 		/*
854 		 * It is a fatal software programming error to attempt
855 		 * to fetch an AEN synchronously when an AEN fetch is
856 		 * already pending.
857 		 */
858 		KASSERT(aenp == NULL);
859 		return (0);
860 	}
861 
862 	tp = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_NOWAIT);
863 	if (tp == NULL)
864 		return (ENOMEM);
865 
866 	ccb = twe_ccb_alloc(sc,
867 	    TWE_CCB_AEN | TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT);
868 	KASSERT(ccb != NULL);
869 
870 	ccb->ccb_data = tp;
871 	ccb->ccb_datasize = TWE_SECTOR_SIZE;
872 	ccb->ccb_tx.tx_handler = (aenp == NULL) ? twe_aen_handler : NULL;
873 	ccb->ccb_tx.tx_context = tp;
874 	ccb->ccb_tx.tx_dv = sc->sc_dev;
875 
876 	tc = ccb->ccb_cmd;
877 	tc->tc_size = 2;
878 	tc->tc_opcode = TWE_OP_GET_PARAM | (tc->tc_size << 5);
879 	tc->tc_unit = 0;
880 	tc->tc_count = htole16(1);
881 
882 	/* Fill in the outbound parameter data. */
883 	tp->tp_table_id = htole16(TWE_PARAM_AEN);
884 	tp->tp_param_id = TWE_PARAM_AEN_UnitCode;
885 	tp->tp_param_size = 2;
886 
887 	/* Map the transfer. */
888 	if ((rv = twe_ccb_map(sc, ccb)) != 0) {
889 		twe_ccb_free(sc, ccb);
890 		goto done;
891 	}
892 
893 	/* Enqueue the command and wait. */
894 	if (aenp != NULL) {
895 		rv = twe_ccb_poll(sc, ccb, 5);
896 		twe_ccb_unmap(sc, ccb);
897 		twe_ccb_free(sc, ccb);
898 		if (rv == 0)
899 			*aenp = le16toh(*(uint16_t *)tp->tp_data);
900 		free(tp, M_DEVBUF);
901 	} else {
902 		sc->sc_flags |= TWEF_AEN;
903 		twe_ccb_enqueue(sc, ccb);
904 		rv = 0;
905 	}
906 
907  done:
908 	return (rv);
909 }
910 
911 /*
912  * Handle an AEN returned by the controller.
913  * MUST BE CALLED AT splbio()!
914  */
915 static void
916 twe_aen_handler(struct twe_ccb *ccb, int error)
917 {
918 	struct twe_softc *sc;
919 	struct twe_param *tp;
920 	uint16_t aen;
921 	int rv;
922 
923 	sc = device_private(ccb->ccb_tx.tx_dv);
924 	tp = ccb->ccb_tx.tx_context;
925 	twe_ccb_unmap(sc, ccb);
926 
927 	sc->sc_flags &= ~TWEF_AEN;
928 
929 	if (error) {
930 		aprint_error_dev(sc->sc_dev, "error retrieving AEN\n");
931 		aen = TWE_AEN_QUEUE_EMPTY;
932 	} else
933 		aen = le16toh(*(u_int16_t *)tp->tp_data);
934 	free(tp, M_DEVBUF);
935 	twe_ccb_free(sc, ccb);
936 
937 	if (TWE_AEN_CODE(aen) == TWE_AEN_QUEUE_EMPTY) {
938 		twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR);
939 		return;
940 	}
941 
942 	twe_aen_enqueue(sc, aen, 0);
943 
944 	/*
945 	 * Chain another retrieval in case interrupts have been
946 	 * coalesced.
947 	 */
948 	rv = twe_aen_get(sc, NULL);
949 	if (rv != 0)
950 		aprint_error_dev(sc->sc_dev,
951 		    "unable to retrieve AEN (%d)\n", rv);
952 }
953 
954 static void
955 twe_aen_enqueue(struct twe_softc *sc, uint16_t aen, int quiet)
956 {
957 	const char *str, *msg;
958 	int s, next, nextnext, level;
959 
960 	/*
961 	 * First report the AEN on the console.  Maybe.
962 	 */
963 	if (! quiet) {
964 		str = twe_describe_code(twe_table_aen, TWE_AEN_CODE(aen));
965 		if (str == NULL) {
966 			aprint_error_dev(sc->sc_dev,
967 			    "unknown AEN 0x%04x\n", aen);
968 		} else {
969 			msg = str + 3;
970 			switch (str[1]) {
971 			case 'E':	level = LOG_EMERG; break;
972 			case 'a':	level = LOG_ALERT; break;
973 			case 'c':	level = LOG_CRIT; break;
974 			case 'e':	level = LOG_ERR; break;
975 			case 'w':	level = LOG_WARNING; break;
976 			case 'n':	level = LOG_NOTICE; break;
977 			case 'i':	level = LOG_INFO; break;
978 			case 'd':	level = LOG_DEBUG; break;
979 			default:
980 				/* Don't use syslog. */
981 				level = -1;
982 			}
983 
984 			if (level < 0) {
985 				switch (str[0]) {
986 				case 'u':
987 				case 'p':
988 					printf("%s: %s %d: %s\n",
989 					    device_xname(sc->sc_dev),
990 					    str[0] == 'u' ? "unit" : "port",
991 					    TWE_AEN_UNIT(aen), msg);
992 					break;
993 
994 				default:
995 					printf("%s: %s\n",
996 					    device_xname(sc->sc_dev), msg);
997 				}
998 			} else {
999 				switch (str[0]) {
1000 				case 'u':
1001 				case 'p':
1002 					log(level, "%s: %s %d: %s\n",
1003 					    device_xname(sc->sc_dev),
1004 					    str[0] == 'u' ? "unit" : "port",
1005 					    TWE_AEN_UNIT(aen), msg);
1006 					break;
1007 
1008 				default:
1009 					log(level, "%s: %s\n",
1010 					    device_xname(sc->sc_dev), msg);
1011 				}
1012 			}
1013 		}
1014 	}
1015 
1016 	/* Now enqueue the AEN for mangement tools. */
1017 	s = splbio();
1018 
1019 	next = (sc->sc_aen_head + 1) % TWE_AEN_Q_LENGTH;
1020 	nextnext = (sc->sc_aen_head + 2) % TWE_AEN_Q_LENGTH;
1021 
1022 	/*
1023 	 * If this is the last free slot, then queue up a "queue
1024 	 * full" message.
1025 	 */
1026 	if (nextnext == sc->sc_aen_tail)
1027 		aen = TWE_AEN_QUEUE_FULL;
1028 
1029 	if (next != sc->sc_aen_tail) {
1030 		sc->sc_aen_queue[sc->sc_aen_head] = aen;
1031 		sc->sc_aen_head = next;
1032 	}
1033 
1034 	if (sc->sc_flags & TWEF_AENQ_WAIT) {
1035 		sc->sc_flags &= ~TWEF_AENQ_WAIT;
1036 		wakeup(&sc->sc_aen_queue);
1037 	}
1038 
1039 	splx(s);
1040 }
1041 
1042 /* NOTE: Must be called at splbio(). */
1043 static uint16_t
1044 twe_aen_dequeue(struct twe_softc *sc)
1045 {
1046 	uint16_t aen;
1047 
1048 	if (sc->sc_aen_tail == sc->sc_aen_head)
1049 		aen = TWE_AEN_QUEUE_EMPTY;
1050 	else {
1051 		aen = sc->sc_aen_queue[sc->sc_aen_tail];
1052 		sc->sc_aen_tail = (sc->sc_aen_tail + 1) % TWE_AEN_Q_LENGTH;
1053 	}
1054 
1055 	return (aen);
1056 }
1057 
1058 /*
1059  * These are short-hand functions that execute TWE_OP_GET_PARAM to
1060  * fetch 1, 2, and 4 byte parameter values, respectively.
1061  */
1062 int
1063 twe_param_get_1(struct twe_softc *sc, int table_id, int param_id,
1064     uint8_t *valp)
1065 {
1066 	struct twe_param *tp;
1067 	int rv;
1068 
1069 	rv = twe_param_get(sc, table_id, param_id, 1, NULL, &tp);
1070 	if (rv != 0)
1071 		return (rv);
1072 	*valp = *(uint8_t *)tp->tp_data;
1073 	free(tp, M_DEVBUF);
1074 	return (0);
1075 }
1076 
1077 int
1078 twe_param_get_2(struct twe_softc *sc, int table_id, int param_id,
1079     uint16_t *valp)
1080 {
1081 	struct twe_param *tp;
1082 	int rv;
1083 
1084 	rv = twe_param_get(sc, table_id, param_id, 2, NULL, &tp);
1085 	if (rv != 0)
1086 		return (rv);
1087 	*valp = le16toh(*(uint16_t *)tp->tp_data);
1088 	free(tp, M_DEVBUF);
1089 	return (0);
1090 }
1091 
1092 int
1093 twe_param_get_4(struct twe_softc *sc, int table_id, int param_id,
1094     uint32_t *valp)
1095 {
1096 	struct twe_param *tp;
1097 	int rv;
1098 
1099 	rv = twe_param_get(sc, table_id, param_id, 4, NULL, &tp);
1100 	if (rv != 0)
1101 		return (rv);
1102 	*valp = le32toh(*(uint32_t *)tp->tp_data);
1103 	free(tp, M_DEVBUF);
1104 	return (0);
1105 }
1106 
1107 /*
1108  * Execute a TWE_OP_GET_PARAM command.  If a callback function is provided,
1109  * it will be called with generated context when the command has completed.
1110  * If no callback is provided, the command will be executed synchronously
1111  * and a pointer to a buffer containing the data returned.
1112  *
1113  * The caller or callback is responsible for freeing the buffer.
1114  *
1115  * NOTE: We assume we can sleep here to wait for a CCB to become available.
1116  */
1117 int
1118 twe_param_get(struct twe_softc *sc, int table_id, int param_id, size_t size,
1119 	      void (*func)(struct twe_ccb *, int), struct twe_param **pbuf)
1120 {
1121 	struct twe_ccb *ccb;
1122 	struct twe_cmd *tc;
1123 	struct twe_param *tp;
1124 	int rv, s;
1125 
1126 	tp = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_NOWAIT);
1127 	if (tp == NULL)
1128 		return ENOMEM;
1129 
1130 	ccb = twe_ccb_alloc_wait(sc, TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT);
1131 	KASSERT(ccb != NULL);
1132 
1133 	ccb->ccb_data = tp;
1134 	ccb->ccb_datasize = TWE_SECTOR_SIZE;
1135 	ccb->ccb_tx.tx_handler = func;
1136 	ccb->ccb_tx.tx_context = tp;
1137 	ccb->ccb_tx.tx_dv = sc->sc_dev;
1138 
1139 	tc = ccb->ccb_cmd;
1140 	tc->tc_size = 2;
1141 	tc->tc_opcode = TWE_OP_GET_PARAM | (tc->tc_size << 5);
1142 	tc->tc_unit = 0;
1143 	tc->tc_count = htole16(1);
1144 
1145 	/* Fill in the outbound parameter data. */
1146 	tp->tp_table_id = htole16(table_id);
1147 	tp->tp_param_id = param_id;
1148 	tp->tp_param_size = size;
1149 
1150 	/* Map the transfer. */
1151 	if ((rv = twe_ccb_map(sc, ccb)) != 0) {
1152 		twe_ccb_free(sc, ccb);
1153 		goto done;
1154 	}
1155 
1156 	/* Submit the command and either wait or let the callback handle it. */
1157 	if (func == NULL) {
1158 		s = splbio();
1159 		rv = twe_ccb_poll(sc, ccb, 5);
1160 		twe_ccb_unmap(sc, ccb);
1161 		twe_ccb_free(sc, ccb);
1162 		splx(s);
1163 	} else {
1164 #ifdef DEBUG
1165 		if (pbuf != NULL)
1166 			panic("both func and pbuf defined");
1167 #endif
1168 		twe_ccb_enqueue(sc, ccb);
1169 		return 0;
1170 	}
1171 
1172 done:
1173 	if (pbuf == NULL || rv != 0)
1174 		free(tp, M_DEVBUF);
1175 	else if (pbuf != NULL && rv == 0)
1176 		*pbuf = tp;
1177 	return rv;
1178 }
1179 
1180 /*
1181  * Execute a TWE_OP_SET_PARAM command.
1182  *
1183  * NOTE: We assume we can sleep here to wait for a CCB to become available.
1184  */
1185 static int
1186 twe_param_set(struct twe_softc *sc, int table_id, int param_id, size_t size,
1187 	      void *sbuf)
1188 {
1189 	struct twe_ccb *ccb;
1190 	struct twe_cmd *tc;
1191 	struct twe_param *tp;
1192 	int rv, s;
1193 
1194 	tp = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_WAITOK);
1195 	ccb = twe_ccb_alloc_wait(sc, TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT);
1196 	ccb->ccb_data = tp;
1197 	ccb->ccb_datasize = TWE_SECTOR_SIZE;
1198 	ccb->ccb_tx.tx_handler = 0;
1199 	ccb->ccb_tx.tx_context = tp;
1200 	ccb->ccb_tx.tx_dv = sc->sc_dev;
1201 
1202 	tc = ccb->ccb_cmd;
1203 	tc->tc_size = 2;
1204 	tc->tc_opcode = TWE_OP_SET_PARAM | (tc->tc_size << 5);
1205 	tc->tc_unit = 0;
1206 	tc->tc_count = htole16(1);
1207 
1208 	/* Fill in the outbound parameter data. */
1209 	tp->tp_table_id = htole16(table_id);
1210 	tp->tp_param_id = param_id;
1211 	tp->tp_param_size = size;
1212 	memcpy(tp->tp_data, sbuf, size);
1213 
1214 	/* Map the transfer. */
1215 	if ((rv = twe_ccb_map(sc, ccb)) != 0) {
1216 		twe_ccb_free(sc, ccb);
1217 		goto done;
1218 	}
1219 
1220 	/* Submit the command and wait. */
1221 	s = splbio();
1222 	rv = twe_ccb_poll(sc, ccb, 5);
1223 	twe_ccb_unmap(sc, ccb);
1224 	twe_ccb_free(sc, ccb);
1225 	splx(s);
1226 done:
1227 	free(tp, M_DEVBUF);
1228 	return (rv);
1229 }
1230 
1231 /*
1232  * Execute a TWE_OP_INIT_CONNECTION command.  Return non-zero on error.
1233  * Must be called with interrupts blocked.
1234  */
1235 static int
1236 twe_init_connection(struct twe_softc *sc)
1237 {
1238 	struct twe_ccb *ccb;
1239 	struct twe_cmd *tc;
1240 	int rv;
1241 
1242 	if ((ccb = twe_ccb_alloc(sc, 0)) == NULL)
1243 		return (EAGAIN);
1244 
1245 	/* Build the command. */
1246 	tc = ccb->ccb_cmd;
1247 	tc->tc_size = 3;
1248 	tc->tc_opcode = TWE_OP_INIT_CONNECTION;
1249 	tc->tc_unit = 0;
1250 	tc->tc_count = htole16(TWE_MAX_CMDS);
1251 	tc->tc_args.init_connection.response_queue_pointer = 0;
1252 
1253 	/* Submit the command for immediate execution. */
1254 	rv = twe_ccb_poll(sc, ccb, 5);
1255 	twe_ccb_free(sc, ccb);
1256 	return (rv);
1257 }
1258 
1259 /*
1260  * Poll the controller for completed commands.  Must be called with
1261  * interrupts blocked.
1262  */
1263 static void
1264 twe_poll(struct twe_softc *sc)
1265 {
1266 	struct twe_ccb *ccb;
1267 	int found;
1268 	u_int status, cmdid;
1269 
1270 	found = 0;
1271 
1272 	for (;;) {
1273 		status = twe_inl(sc, TWE_REG_STS);
1274 		twe_status_check(sc, status);
1275 
1276 		if ((status & TWE_STS_RESP_QUEUE_EMPTY))
1277 			break;
1278 
1279 		found = 1;
1280 		cmdid = twe_inl(sc, TWE_REG_RESP_QUEUE);
1281 		cmdid = (cmdid & TWE_RESP_MASK) >> TWE_RESP_SHIFT;
1282 		if (cmdid >= TWE_MAX_QUEUECNT) {
1283 			aprint_error_dev(sc->sc_dev, "bad cmdid %d\n", cmdid);
1284 			continue;
1285 		}
1286 
1287 		ccb = sc->sc_ccbs + cmdid;
1288 		if ((ccb->ccb_flags & TWE_CCB_ACTIVE) == 0) {
1289 			printf("%s: CCB for cmdid %d not active\n",
1290 			    device_xname(sc->sc_dev), cmdid);
1291 			continue;
1292 		}
1293 		ccb->ccb_flags ^= TWE_CCB_COMPLETE | TWE_CCB_ACTIVE;
1294 
1295 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1296 		    (char *)ccb->ccb_cmd - (char *)sc->sc_cmds,
1297 		    sizeof(struct twe_cmd),
1298 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1299 
1300 		/* Pass notification to upper layers. */
1301 		if (ccb->ccb_tx.tx_handler != NULL)
1302 			(*ccb->ccb_tx.tx_handler)(ccb,
1303 			    ccb->ccb_cmd->tc_status != 0 ? EIO : 0);
1304 	}
1305 
1306 	/* If any commands have completed, run the software queue. */
1307 	if (found)
1308 		twe_ccb_enqueue(sc, NULL);
1309 }
1310 
1311 /*
1312  * Wait for `status' to be set in the controller status register.  Return
1313  * zero if found, non-zero if the operation timed out.
1314  */
1315 static int
1316 twe_status_wait(struct twe_softc *sc, u_int32_t status, int timo)
1317 {
1318 
1319 	for (timo *= 10; timo != 0; timo--) {
1320 		if ((twe_inl(sc, TWE_REG_STS) & status) == status)
1321 			break;
1322 		delay(100000);
1323 	}
1324 
1325 	return (timo == 0);
1326 }
1327 
1328 /*
1329  * Clear a PCI parity error.
1330  */
1331 static void
1332 twe_clear_pci_parity_error(struct twe_softc *sc)
1333 {
1334 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0x0,
1335 	    TWE_CTL_CLEAR_PARITY_ERROR);
1336 
1337 	//FreeBSD: pci_write_config(sc->twe_dev, PCIR_STATUS, TWE_PCI_CLEAR_PARITY_ERROR, 2);
1338 }
1339 
1340 
1341 /*
1342  * Clear a PCI abort.
1343  */
1344 static void
1345 twe_clear_pci_abort(struct twe_softc *sc)
1346 {
1347 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0x0, TWE_CTL_CLEAR_PCI_ABORT);
1348 
1349 	//FreeBSD: pci_write_config(sc->twe_dev, PCIR_STATUS, TWE_PCI_CLEAR_PCI_ABORT, 2);
1350 }
1351 
1352 /*
1353  * Complain if the status bits aren't what we expect.
1354  */
1355 static int
1356 twe_status_check(struct twe_softc *sc, u_int status)
1357 {
1358 	int rv;
1359 
1360 	rv = 0;
1361 
1362 	if ((status & TWE_STS_EXPECTED_BITS) != TWE_STS_EXPECTED_BITS) {
1363 		aprint_error_dev(sc->sc_dev, "missing status bits: 0x%08x\n",
1364 		    status & ~TWE_STS_EXPECTED_BITS);
1365 		rv = -1;
1366 	}
1367 
1368 	if ((status & TWE_STS_UNEXPECTED_BITS) != 0) {
1369 		aprint_error_dev(sc->sc_dev, "unexpected status bits: 0x%08x\n",
1370 		    status & TWE_STS_UNEXPECTED_BITS);
1371 		rv = -1;
1372 		if (status & TWE_STS_PCI_PARITY_ERROR) {
1373 			aprint_error_dev(sc->sc_dev, "PCI parity error: Reseat"
1374 			    " card, move card or buggy device present.\n");
1375 			twe_clear_pci_parity_error(sc);
1376 		}
1377 		if (status & TWE_STS_PCI_ABORT) {
1378 			aprint_error_dev(sc->sc_dev, "PCI abort, clearing.\n");
1379 			twe_clear_pci_abort(sc);
1380 		}
1381 	}
1382 
1383 	return (rv);
1384 }
1385 
1386 /*
1387  * Allocate and initialise a CCB.
1388  */
1389 static inline void
1390 twe_ccb_init(struct twe_softc *sc, struct twe_ccb *ccb, int flags)
1391 {
1392 	struct twe_cmd *tc;
1393 
1394 	ccb->ccb_tx.tx_handler = NULL;
1395 	ccb->ccb_flags = flags;
1396 	tc = ccb->ccb_cmd;
1397 	tc->tc_status = 0;
1398 	tc->tc_flags = 0;
1399 	tc->tc_cmdid = ccb->ccb_cmdid;
1400 }
1401 
1402 struct twe_ccb *
1403 twe_ccb_alloc(struct twe_softc *sc, int flags)
1404 {
1405 	struct twe_ccb *ccb;
1406 	int s;
1407 
1408 	s = splbio();
1409 	if (__predict_false((flags & TWE_CCB_AEN) != 0)) {
1410 		/* Use the reserved CCB. */
1411 		ccb = sc->sc_ccbs;
1412 	} else {
1413 		/* Allocate a CCB and command block. */
1414 		if (__predict_false((ccb =
1415 				SLIST_FIRST(&sc->sc_ccb_freelist)) == NULL)) {
1416 			splx(s);
1417 			return (NULL);
1418 		}
1419 		SLIST_REMOVE_HEAD(&sc->sc_ccb_freelist, ccb_chain.slist);
1420 	}
1421 #ifdef DIAGNOSTIC
1422 	if ((long)(ccb - sc->sc_ccbs) == 0 && (flags & TWE_CCB_AEN) == 0)
1423 		panic("twe_ccb_alloc: got reserved CCB for non-AEN");
1424 	if ((ccb->ccb_flags & TWE_CCB_ALLOCED) != 0)
1425 		panic("twe_ccb_alloc: CCB %ld already allocated",
1426 		    (long)(ccb - sc->sc_ccbs));
1427 	flags |= TWE_CCB_ALLOCED;
1428 #endif
1429 	splx(s);
1430 
1431 	twe_ccb_init(sc, ccb, flags);
1432 	return (ccb);
1433 }
1434 
1435 struct twe_ccb *
1436 twe_ccb_alloc_wait(struct twe_softc *sc, int flags)
1437 {
1438 	struct twe_ccb *ccb;
1439 	int s;
1440 
1441 	KASSERT((flags & TWE_CCB_AEN) == 0);
1442 
1443 	s = splbio();
1444 	while (__predict_false((ccb =
1445 				SLIST_FIRST(&sc->sc_ccb_freelist)) == NULL)) {
1446 		sc->sc_flags |= TWEF_WAIT_CCB;
1447 		(void) tsleep(&sc->sc_ccb_freelist, PRIBIO, "tweccb", 0);
1448 	}
1449 	SLIST_REMOVE_HEAD(&sc->sc_ccb_freelist, ccb_chain.slist);
1450 #ifdef DIAGNOSTIC
1451 	if ((ccb->ccb_flags & TWE_CCB_ALLOCED) != 0)
1452 		panic("twe_ccb_alloc_wait: CCB %ld already allocated",
1453 		    (long)(ccb - sc->sc_ccbs));
1454 	flags |= TWE_CCB_ALLOCED;
1455 #endif
1456 	splx(s);
1457 
1458 	twe_ccb_init(sc, ccb, flags);
1459 	return (ccb);
1460 }
1461 
1462 /*
1463  * Free a CCB.
1464  */
1465 void
1466 twe_ccb_free(struct twe_softc *sc, struct twe_ccb *ccb)
1467 {
1468 	int s;
1469 
1470 	s = splbio();
1471 	if ((ccb->ccb_flags & TWE_CCB_AEN) == 0) {
1472 		SLIST_INSERT_HEAD(&sc->sc_ccb_freelist, ccb, ccb_chain.slist);
1473 		if (__predict_false((sc->sc_flags & TWEF_WAIT_CCB) != 0)) {
1474 			sc->sc_flags &= ~TWEF_WAIT_CCB;
1475 			wakeup(&sc->sc_ccb_freelist);
1476 		}
1477 	}
1478 	ccb->ccb_flags = 0;
1479 	splx(s);
1480 }
1481 
1482 /*
1483  * Map the specified CCB's command block and data buffer (if any) into
1484  * controller visible space.  Perform DMA synchronisation.
1485  */
1486 int
1487 twe_ccb_map(struct twe_softc *sc, struct twe_ccb *ccb)
1488 {
1489 	struct twe_cmd *tc;
1490 	int flags, nsegs, i, s, rv;
1491 	void *data;
1492 
1493 	/*
1494 	 * The data as a whole must be 512-byte aligned.
1495 	 */
1496 	if (((u_long)ccb->ccb_data & (TWE_ALIGNMENT - 1)) != 0) {
1497 		s = splvm();
1498 		/* XXX */
1499 		rv = uvm_km_kmem_alloc(kmem_va_arena,
1500 		    ccb->ccb_datasize, (VM_NOSLEEP | VM_INSTANTFIT),
1501 		    (vmem_addr_t *)&ccb->ccb_abuf);
1502 		splx(s);
1503 		data = (void *)ccb->ccb_abuf;
1504 		if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0)
1505 			memcpy(data, ccb->ccb_data, ccb->ccb_datasize);
1506 	} else {
1507 		ccb->ccb_abuf = (vaddr_t)0;
1508 		data = ccb->ccb_data;
1509 	}
1510 
1511 	/*
1512 	 * Map the data buffer into bus space and build the S/G list.
1513 	 */
1514 	rv = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap_xfer, data,
1515 	    ccb->ccb_datasize, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1516 	    ((ccb->ccb_flags & TWE_CCB_DATA_IN) ?
1517 	    BUS_DMA_READ : BUS_DMA_WRITE));
1518 	if (rv != 0) {
1519 		if (ccb->ccb_abuf != (vaddr_t)0) {
1520 			s = splvm();
1521 			/* XXX */
1522 			uvm_km_kmem_free(kmem_va_arena, ccb->ccb_abuf,
1523 			    ccb->ccb_datasize);
1524 			splx(s);
1525 		}
1526 		return (rv);
1527 	}
1528 
1529 	nsegs = ccb->ccb_dmamap_xfer->dm_nsegs;
1530 	tc = ccb->ccb_cmd;
1531 	tc->tc_size += 2 * nsegs;
1532 
1533 	/* The location of the S/G list is dependent upon command type. */
1534 	switch (tc->tc_opcode >> 5) {
1535 	case 2:
1536 		for (i = 0; i < nsegs; i++) {
1537 			tc->tc_args.param.sgl[i].tsg_address =
1538 			    htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_addr);
1539 			tc->tc_args.param.sgl[i].tsg_length =
1540 			    htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_len);
1541 		}
1542 		/* XXX Needed? */
1543 		for (; i < TWE_SG_SIZE; i++) {
1544 			tc->tc_args.param.sgl[i].tsg_address = 0;
1545 			tc->tc_args.param.sgl[i].tsg_length = 0;
1546 		}
1547 		break;
1548 	case 3:
1549 		for (i = 0; i < nsegs; i++) {
1550 			tc->tc_args.io.sgl[i].tsg_address =
1551 			    htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_addr);
1552 			tc->tc_args.io.sgl[i].tsg_length =
1553 			    htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_len);
1554 		}
1555 		/* XXX Needed? */
1556 		for (; i < TWE_SG_SIZE; i++) {
1557 			tc->tc_args.io.sgl[i].tsg_address = 0;
1558 			tc->tc_args.io.sgl[i].tsg_length = 0;
1559 		}
1560 		break;
1561 	default:
1562 		/*
1563 		 * In all likelihood, this is a command passed from
1564 		 * management tools in userspace where no S/G list is
1565 		 * necessary because no data is being passed.
1566 		 */
1567 		break;
1568 	}
1569 
1570 	if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0)
1571 		flags = BUS_DMASYNC_PREREAD;
1572 	else
1573 		flags = 0;
1574 	if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0)
1575 		flags |= BUS_DMASYNC_PREWRITE;
1576 
1577 	bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0,
1578 	    ccb->ccb_datasize, flags);
1579 	return (0);
1580 }
1581 
1582 /*
1583  * Unmap the specified CCB's command block and data buffer (if any) and
1584  * perform DMA synchronisation.
1585  */
1586 void
1587 twe_ccb_unmap(struct twe_softc *sc, struct twe_ccb *ccb)
1588 {
1589 	int flags, s;
1590 
1591 	if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0)
1592 		flags = BUS_DMASYNC_POSTREAD;
1593 	else
1594 		flags = 0;
1595 	if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0)
1596 		flags |= BUS_DMASYNC_POSTWRITE;
1597 
1598 	bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0,
1599 	    ccb->ccb_datasize, flags);
1600 	bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap_xfer);
1601 
1602 	if (ccb->ccb_abuf != (vaddr_t)0) {
1603 		if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0)
1604 			memcpy(ccb->ccb_data, (void *)ccb->ccb_abuf,
1605 			    ccb->ccb_datasize);
1606 		s = splvm();
1607 		/* XXX */
1608 		uvm_km_kmem_free(kmem_va_arena, ccb->ccb_abuf,
1609 		    ccb->ccb_datasize);
1610 		splx(s);
1611 	}
1612 }
1613 
1614 /*
1615  * Submit a command to the controller and poll on completion.  Return
1616  * non-zero on timeout (but don't check status, as some command types don't
1617  * return status).  Must be called with interrupts blocked.
1618  */
1619 int
1620 twe_ccb_poll(struct twe_softc *sc, struct twe_ccb *ccb, int timo)
1621 {
1622 	int rv;
1623 
1624 	if ((rv = twe_ccb_submit(sc, ccb)) != 0)
1625 		return (rv);
1626 
1627 	for (timo *= 1000; timo != 0; timo--) {
1628 		twe_poll(sc);
1629 		if ((ccb->ccb_flags & TWE_CCB_COMPLETE) != 0)
1630 			break;
1631 		DELAY(100);
1632 	}
1633 
1634 	return (timo == 0);
1635 }
1636 
1637 /*
1638  * If a CCB is specified, enqueue it.  Pull CCBs off the software queue in
1639  * the order that they were enqueued and try to submit their command blocks
1640  * to the controller for execution.
1641  */
1642 void
1643 twe_ccb_enqueue(struct twe_softc *sc, struct twe_ccb *ccb)
1644 {
1645 	int s;
1646 
1647 	s = splbio();
1648 
1649 	if (ccb != NULL)
1650 		SIMPLEQ_INSERT_TAIL(&sc->sc_ccb_queue, ccb, ccb_chain.simpleq);
1651 
1652 	while ((ccb = SIMPLEQ_FIRST(&sc->sc_ccb_queue)) != NULL) {
1653 		if (twe_ccb_submit(sc, ccb))
1654 			break;
1655 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_queue, ccb_chain.simpleq);
1656 	}
1657 
1658 	splx(s);
1659 }
1660 
1661 /*
1662  * Submit the command block associated with the specified CCB to the
1663  * controller for execution.  Must be called with interrupts blocked.
1664  */
1665 int
1666 twe_ccb_submit(struct twe_softc *sc, struct twe_ccb *ccb)
1667 {
1668 	bus_addr_t pa;
1669 	int rv;
1670 	u_int status;
1671 
1672 	/* Check to see if we can post a command. */
1673 	status = twe_inl(sc, TWE_REG_STS);
1674 	twe_status_check(sc, status);
1675 
1676 	if ((status & TWE_STS_CMD_QUEUE_FULL) == 0) {
1677 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1678 		    (char *)ccb->ccb_cmd - (char *)sc->sc_cmds,
1679 		    sizeof(struct twe_cmd),
1680 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1681 #ifdef DIAGNOSTIC
1682 		if ((ccb->ccb_flags & TWE_CCB_ALLOCED) == 0)
1683 			panic("%s: CCB %ld not ALLOCED\n",
1684 			    device_xname(sc->sc_dev), (long)(ccb - sc->sc_ccbs));
1685 #endif
1686 		ccb->ccb_flags |= TWE_CCB_ACTIVE;
1687 		pa = sc->sc_cmds_paddr +
1688 		    ccb->ccb_cmdid * sizeof(struct twe_cmd);
1689 		twe_outl(sc, TWE_REG_CMD_QUEUE, (u_int32_t)pa);
1690 		rv = 0;
1691 	} else
1692 		rv = EBUSY;
1693 
1694 	return (rv);
1695 }
1696 
1697 
1698 /*
1699  * Accept an open operation on the control device.
1700  */
1701 static int
1702 tweopen(dev_t dev, int flag, int mode, struct lwp *l)
1703 {
1704 	struct twe_softc *twe;
1705 
1706 	if ((twe = device_lookup_private(&twe_cd, minor(dev))) == NULL)
1707 		return (ENXIO);
1708 	if ((twe->sc_flags & TWEF_OPEN) != 0)
1709 		return (EBUSY);
1710 
1711 	twe->sc_flags |= TWEF_OPEN;
1712 	return (0);
1713 }
1714 
1715 /*
1716  * Accept the last close on the control device.
1717  */
1718 static int
1719 tweclose(dev_t dev, int flag, int mode,
1720     struct lwp *l)
1721 {
1722 	struct twe_softc *twe;
1723 
1724 	twe = device_lookup_private(&twe_cd, minor(dev));
1725 	twe->sc_flags &= ~TWEF_OPEN;
1726 	return (0);
1727 }
1728 
1729 void
1730 twe_ccb_wait_handler(struct twe_ccb *ccb, int error)
1731 {
1732 
1733 	/* Just wake up the sleeper. */
1734 	wakeup(ccb);
1735 }
1736 
1737 /*
1738  * Handle control operations.
1739  */
1740 static int
1741 tweioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
1742 {
1743 	struct twe_softc *twe;
1744 	struct twe_ccb *ccb;
1745 	struct twe_param *param;
1746 	struct twe_usercommand *tu;
1747 	struct twe_paramcommand *tp;
1748 	struct twe_drivecommand *td;
1749 	void *pdata = NULL;
1750 	int s, error = 0;
1751 	u_int8_t cmdid;
1752 
1753 	twe = device_lookup_private(&twe_cd, minor(dev));
1754 	tu = (struct twe_usercommand *)data;
1755 	tp = (struct twe_paramcommand *)data;
1756 	td = (struct twe_drivecommand *)data;
1757 
1758 	/* This is intended to be compatible with the FreeBSD interface. */
1759 	switch (cmd) {
1760 	case TWEIO_COMMAND:
1761 		error = kauth_authorize_device_passthru(l->l_cred, dev,
1762 		    KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
1763 		if (error)
1764 			return (error);
1765 
1766 		/* XXX mutex */
1767 		if (tu->tu_size > 0) {
1768 			/*
1769 			 * XXX Handle > TWE_SECTOR_SIZE?  Let's see if
1770 			 * it's really necessary, first.
1771 			 */
1772 			if (tu->tu_size > TWE_SECTOR_SIZE) {
1773 #ifdef TWE_DEBUG
1774 				printf("%s: TWEIO_COMMAND: tu_size = %zu\n",
1775 				    device_xname(twe->sc_dev), tu->tu_size);
1776 #endif
1777 				return EINVAL;
1778 			}
1779 			pdata = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_WAITOK);
1780 			error = copyin(tu->tu_data, pdata, tu->tu_size);
1781 			if (error != 0)
1782 				goto done;
1783 			ccb = twe_ccb_alloc_wait(twe,
1784 			    TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT);
1785 			KASSERT(ccb != NULL);
1786 			ccb->ccb_data = pdata;
1787 			ccb->ccb_datasize = TWE_SECTOR_SIZE;
1788 		} else {
1789 			ccb = twe_ccb_alloc_wait(twe, 0);
1790 			KASSERT(ccb != NULL);
1791 		}
1792 
1793 		ccb->ccb_tx.tx_handler = twe_ccb_wait_handler;
1794 		ccb->ccb_tx.tx_context = NULL;
1795 		ccb->ccb_tx.tx_dv = twe->sc_dev;
1796 
1797 		cmdid = ccb->ccb_cmdid;
1798 		memcpy(ccb->ccb_cmd, &tu->tu_cmd, sizeof(struct twe_cmd));
1799 		ccb->ccb_cmd->tc_cmdid = cmdid;
1800 
1801 		/* Map the transfer. */
1802 		if ((error = twe_ccb_map(twe, ccb)) != 0) {
1803 			twe_ccb_free(twe, ccb);
1804 			goto done;
1805 		}
1806 
1807 		/* Submit the command and wait up to 1 minute. */
1808 		error = 0;
1809 		twe_ccb_enqueue(twe, ccb);
1810 		s = splbio();
1811 		while ((ccb->ccb_flags & TWE_CCB_COMPLETE) == 0)
1812 			if ((error = tsleep(ccb, PRIBIO, "tweioctl",
1813 					    60 * hz)) != 0)
1814 				break;
1815 		splx(s);
1816 
1817 		/* Copy the command back to the ioctl argument. */
1818 		memcpy(&tu->tu_cmd, ccb->ccb_cmd, sizeof(struct twe_cmd));
1819 #ifdef TWE_DEBUG
1820 		printf("%s: TWEIO_COMMAND: tc_opcode = 0x%02x, "
1821 		    "tc_status = 0x%02x\n", device_xname(twe->sc_dev),
1822 		    tu->tu_cmd.tc_opcode, tu->tu_cmd.tc_status);
1823 #endif
1824 
1825 		s = splbio();
1826 		twe_ccb_free(twe, ccb);
1827 		splx(s);
1828 
1829 		if (tu->tu_size > 0)
1830 			error = copyout(pdata, tu->tu_data, tu->tu_size);
1831 		goto done;
1832 
1833 	case TWEIO_STATS:
1834 		return (ENOENT);
1835 
1836 	case TWEIO_AEN_POLL:
1837 		s = splbio();
1838 		*(u_int *)data = twe_aen_dequeue(twe);
1839 		splx(s);
1840 		return (0);
1841 
1842 	case TWEIO_AEN_WAIT:
1843 		s = splbio();
1844 		while ((*(u_int *)data =
1845 		    twe_aen_dequeue(twe)) == TWE_AEN_QUEUE_EMPTY) {
1846 			twe->sc_flags |= TWEF_AENQ_WAIT;
1847 			error = tsleep(&twe->sc_aen_queue, PRIBIO | PCATCH,
1848 			    "tweaen", 0);
1849 			if (error == EINTR) {
1850 				splx(s);
1851 				return (error);
1852 			}
1853 		}
1854 		splx(s);
1855 		return (0);
1856 
1857 	case TWEIO_GET_PARAM:
1858 		error = twe_param_get(twe, tp->tp_table_id, tp->tp_param_id,
1859 		    tp->tp_size, 0, &param);
1860 		if (error != 0)
1861 			return (error);
1862 		if (param->tp_param_size > tp->tp_size) {
1863 			error = EFAULT;
1864 			goto done;
1865 		}
1866 		error = copyout(param->tp_data, tp->tp_data,
1867 		    param->tp_param_size);
1868 		free(param, M_DEVBUF);
1869 		goto done;
1870 
1871 	case TWEIO_SET_PARAM:
1872 		pdata = malloc(tp->tp_size, M_DEVBUF, M_WAITOK);
1873 		if ((error = copyin(tp->tp_data, pdata, tp->tp_size)) != 0)
1874 			goto done;
1875 		error = twe_param_set(twe, tp->tp_table_id, tp->tp_param_id,
1876 		    tp->tp_size, pdata);
1877 		goto done;
1878 
1879 	case TWEIO_RESET:
1880 		s = splbio();
1881 		twe_reset(twe);
1882 		splx(s);
1883 		return (0);
1884 
1885 	case TWEIO_ADD_UNIT:
1886 		/* XXX mutex */
1887 		return (twe_add_unit(twe, td->td_unit));
1888 
1889 	case TWEIO_DEL_UNIT:
1890 		/* XXX mutex */
1891 		return (twe_del_unit(twe, td->td_unit));
1892 
1893 	default:
1894 		return EINVAL;
1895 	}
1896 done:
1897 	if (pdata)
1898 		free(pdata, M_DEVBUF);
1899 	return error;
1900 }
1901 
1902 const struct cdevsw twe_cdevsw = {
1903 	.d_open = tweopen,
1904 	.d_close = tweclose,
1905 	.d_read = noread,
1906 	.d_write = nowrite,
1907 	.d_ioctl = tweioctl,
1908 	.d_stop = nostop,
1909 	.d_tty = notty,
1910 	.d_poll = nopoll,
1911 	.d_mmap = nommap,
1912 	.d_kqfilter = nokqfilter,
1913 	.d_discard = nodiscard,
1914 	.d_flag = D_OTHER
1915 };
1916 
1917 /*
1918  * Print some information about the controller
1919  */
1920 static void
1921 twe_describe_controller(struct twe_softc *sc)
1922 {
1923 	struct twe_param *p[6];
1924 	int i, rv = 0;
1925 	uint32_t dsize;
1926 	uint8_t ports;
1927 
1928 	ports = 0;
1929 
1930 	/* get the port count */
1931 	rv |= twe_param_get_1(sc, TWE_PARAM_CONTROLLER,
1932 		TWE_PARAM_CONTROLLER_PortCount, &ports);
1933 
1934 	/* get version strings */
1935 	rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_Mon,
1936 		16, NULL, &p[0]);
1937 	rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_FW,
1938 		16, NULL, &p[1]);
1939 	rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_BIOS,
1940 		16, NULL, &p[2]);
1941 	rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_PCB,
1942 		8, NULL, &p[3]);
1943 	rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_ATA,
1944 		8, NULL, &p[4]);
1945 	rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_PCI,
1946 		8, NULL, &p[5]);
1947 
1948 	if (rv) {
1949 		/* some error occurred */
1950 		aprint_error_dev(sc->sc_dev,
1951 		    "failed to fetch version information\n");
1952 		return;
1953 	}
1954 
1955 	aprint_normal_dev(sc->sc_dev, "%d ports, Firmware %.16s, BIOS %.16s\n",
1956 	    ports, p[1]->tp_data, p[2]->tp_data);
1957 
1958 	aprint_verbose_dev(sc->sc_dev,
1959 	    "Monitor %.16s, PCB %.8s, Achip %.8s, Pchip %.8s\n",
1960 	    p[0]->tp_data, p[3]->tp_data,
1961 	    p[4]->tp_data, p[5]->tp_data);
1962 
1963 	free(p[0], M_DEVBUF);
1964 	free(p[1], M_DEVBUF);
1965 	free(p[2], M_DEVBUF);
1966 	free(p[3], M_DEVBUF);
1967 	free(p[4], M_DEVBUF);
1968 	free(p[5], M_DEVBUF);
1969 
1970 	rv = twe_param_get(sc, TWE_PARAM_DRIVESUMMARY,
1971 	    TWE_PARAM_DRIVESUMMARY_Status, 16, NULL, &p[0]);
1972 	if (rv) {
1973 		aprint_error_dev(sc->sc_dev,
1974 		    "failed to get drive status summary\n");
1975 		return;
1976 	}
1977 	for (i = 0; i < ports; i++) {
1978 		if (p[0]->tp_data[i] != TWE_PARAM_DRIVESTATUS_Present)
1979 			continue;
1980 		rv = twe_param_get_4(sc, TWE_PARAM_DRIVEINFO + i,
1981 		    TWE_PARAM_DRIVEINFO_Size, &dsize);
1982 		if (rv) {
1983 			aprint_error_dev(sc->sc_dev,
1984 			    "unable to get drive size for port %d\n", i);
1985 			continue;
1986 		}
1987 		rv = twe_param_get(sc, TWE_PARAM_DRIVEINFO + i,
1988 		    TWE_PARAM_DRIVEINFO_Model, 40, NULL, &p[1]);
1989 		if (rv) {
1990 			aprint_error_dev(sc->sc_dev,
1991 			    "unable to get drive model for port %d\n", i);
1992 			continue;
1993 		}
1994 		aprint_verbose_dev(sc->sc_dev, "port %d: %.40s %d MB\n",
1995 		    i, p[1]->tp_data, dsize / 2048);
1996 		free(p[1], M_DEVBUF);
1997 	}
1998 	free(p[0], M_DEVBUF);
1999 }
2000 
2001 MODULE(MODULE_CLASS_DRIVER, twe, "pci");
2002 
2003 #ifdef _MODULE
2004 #include "ioconf.c"
2005 #endif
2006 
2007 static int
2008 twe_modcmd(modcmd_t cmd, void *opaque)
2009 {
2010 	int error = 0;
2011 
2012 #ifdef _MODULE
2013 	switch (cmd) {
2014 	case MODULE_CMD_INIT:
2015 		error = config_init_component(cfdriver_ioconf_twe,
2016 		    cfattach_ioconf_twe, cfdata_ioconf_twe);
2017 		break;
2018 	case MODULE_CMD_FINI:
2019 		error = config_fini_component(cfdriver_ioconf_twe,
2020 		    cfattach_ioconf_twe, cfdata_ioconf_twe);
2021 		break;
2022 	default:
2023 		error = ENOTTY;
2024 		break;
2025 	}
2026 #endif
2027 
2028 	return error;
2029 }
2030