xref: /netbsd-src/sys/dev/pci/twa.c (revision fad4c9f71477ae11cea2ee75ec82151ac770a534)
1 /*	$NetBSD: twa.c,v 1.3 2006/05/25 01:37:08 wrstuden Exp $ */
2 /*	$wasabi: twa.c,v 1.25 2006/05/01 15:16:59 simonb Exp $	*/
3 /*
4  * Copyright (c) 2004-2006 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Your Wasabi Systems License Agreement specifies the terms and
8  * conditions for use and redistribution.
9  */
10 
11 /*-
12  * Copyright (c) 2004 The NetBSD Foundation, Inc.
13  * All rights reserved.
14  *
15  * This code is derived from software contributed to The NetBSD Foundation
16  * by Jordan Rhody of Wasabi Systems, Inc.
17  *
18  * Redistribution and use in source and binary forms, with or without
19  * modification, are permitted provided that the following conditions
20  * are met:
21  * 1. Redistributions of source code must retain the above copyright
22  *    notice, this list of conditions and the following disclaimer.
23  * 2. Redistributions in binary form must reproduce the above copyright
24  *    notice, this list of conditions and the following disclaimer in the
25  *    documentation and/or other materials provided with the distribution.
26  * 3. All advertising materials mentioning features or use of this software
27  *    must display the following acknowledgement:
28  *        This product includes software developed by the NetBSD
29  *        Foundation, Inc. and its contributors.
30  * 4. Neither the name of The NetBSD Foundation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
35  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
36  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
37  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
38  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
39  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
40  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
41  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
42  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
43  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
44  * POSSIBILITY OF SUCH DAMAGE.
45  */
46 
47 /*-
48  * Copyright (c) 2003-04 3ware, Inc.
49  * Copyright (c) 2000 Michael Smith
50  * Copyright (c) 2000 BSDi
51  * All rights reserved.
52  *
53  * Redistribution and use in source and binary forms, with or without
54  * modification, are permitted provided that the following conditions
55  * are met:
56  * 1. Redistributions of source code must retain the above copyright
57  *    notice, this list of conditions and the following disclaimer.
58  * 2. Redistributions in binary form must reproduce the above copyright
59  *    notice, this list of conditions and the following disclaimer in the
60  *    documentation and/or other materials provided with the distribution.
61  *
62  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
63  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
66  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72  * SUCH DAMAGE.
73  *
74  *	$FreeBSD: src/sys/dev/twa/twa.c,v 1.2 2004/04/02 15:09:57 des Exp $
75  */
76 
77 /*
78  * 3ware driver for 9000 series storage controllers.
79  *
80  * Author: Vinod Kashyap
81  */
82 
83 #include <sys/cdefs.h>
84 __KERNEL_RCSID(0, "$NetBSD: twa.c,v 1.3 2006/05/25 01:37:08 wrstuden Exp $");
85 
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/device.h>
90 #include <sys/queue.h>
91 #include <sys/proc.h>
92 #include <sys/bswap.h>
93 #include <sys/buf.h>
94 #include <sys/bufq.h>
95 #include <sys/endian.h>
96 #include <sys/malloc.h>
97 #include <sys/conf.h>
98 #include <sys/disk.h>
99 #include <sys/syslog.h>
100 
101 #include <uvm/uvm_extern.h>
102 
103 #include <machine/bus.h>
104 
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/twareg.h>
109 #include <dev/pci/twavar.h>
110 #include <dev/pci/twaio.h>
111 
112 #include <dev/scsipi/scsipi_all.h>
113 #include <dev/scsipi/scsipi_disk.h>
114 #include <dev/scsipi/scsipiconf.h>
115 #include <dev/scsipi/scsi_spc.h>
116 
117 #include <dev/ldvar.h>
118 
119 #include "locators.h"
120 
121 #define	PCI_CBIO	0x10
122 
123 static int	twa_fetch_aen(struct twa_softc *);
124 static void	twa_aen_callback(struct twa_request *);
125 static int	twa_find_aen(struct twa_softc *sc, u_int16_t);
126 static uint16_t	twa_enqueue_aen(struct twa_softc *sc,
127 			struct twa_command_header *);
128 
129 static void	twa_attach(struct device *, struct device *, void *);
130 static void	twa_shutdown(void *);
131 static int	twa_init_connection(struct twa_softc *, u_int16_t, u_int32_t,
132 				    u_int16_t, u_int16_t, u_int16_t, u_int16_t, u_int16_t *,
133 					u_int16_t *, u_int16_t *, u_int16_t *, u_int32_t *);
134 static int	twa_intr(void *);
135 static int 	twa_match(struct device *, struct cfdata *, void *);
136 static int	twa_reset(struct twa_softc *);
137 
138 static int	twa_print(void *, const char *);
139 static int	twa_soft_reset(struct twa_softc *);
140 
141 static int	twa_check_ctlr_state(struct twa_softc *, u_int32_t);
142 static int	twa_get_param(struct twa_softc *, int, int, size_t,
143 				void (* callback)(struct twa_request *),
144 				struct twa_param_9k **);
145 static int 	twa_set_param(struct twa_softc *, int, int, int, void *,
146 				void (* callback)(struct twa_request *));
147 static void	twa_describe_controller(struct twa_softc *);
148 static int	twa_wait_status(struct twa_softc *, u_int32_t, u_int32_t);
149 static int	twa_done(struct twa_softc *);
150 #if 0
151 static int	twa_flash_firmware(struct twa_softc *sc);
152 static int	twa_hard_reset(struct twa_softc *sc);
153 #endif
154 
155 extern struct	cfdriver twa_cd;
156 extern uint32_t twa_fw_img_size;
157 extern uint8_t	twa_fw_img[];
158 
159 CFATTACH_DECL(twa, sizeof(struct twa_softc),
160     twa_match, twa_attach, NULL, NULL);
161 
162 /* AEN messages. */
163 static const struct twa_message	twa_aen_table[] = {
164 	{0x0000, "AEN queue empty"},
165 	{0x0001, "Controller reset occurred"},
166 	{0x0002, "Degraded unit detected"},
167 	{0x0003, "Controller error occured"},
168 	{0x0004, "Background rebuild failed"},
169 	{0x0005, "Background rebuild done"},
170 	{0x0006, "Incomplete unit detected"},
171 	{0x0007, "Background initialize done"},
172 	{0x0008, "Unclean shutdown detected"},
173 	{0x0009, "Drive timeout detected"},
174 	{0x000A, "Drive error detected"},
175 	{0x000B, "Rebuild started"},
176 	{0x000C, "Background initialize started"},
177 	{0x000D, "Entire logical unit was deleted"},
178 	{0x000E, "Background initialize failed"},
179 	{0x000F, "SMART attribute exceeded threshold"},
180 	{0x0010, "Power supply reported AC under range"},
181 	{0x0011, "Power supply reported DC out of range"},
182 	{0x0012, "Power supply reported a malfunction"},
183 	{0x0013, "Power supply predicted malfunction"},
184 	{0x0014, "Battery charge is below threshold"},
185 	{0x0015, "Fan speed is below threshold"},
186 	{0x0016, "Temperature sensor is above threshold"},
187 	{0x0017, "Power supply was removed"},
188 	{0x0018, "Power supply was inserted"},
189 	{0x0019, "Drive was removed from a bay"},
190 	{0x001A, "Drive was inserted into a bay"},
191 	{0x001B, "Drive bay cover door was opened"},
192 	{0x001C, "Drive bay cover door was closed"},
193 	{0x001D, "Product case was opened"},
194 	{0x0020, "Prepare for shutdown (power-off)"},
195 	{0x0021, "Downgrade UDMA mode to lower speed"},
196 	{0x0022, "Upgrade UDMA mode to higher speed"},
197 	{0x0023, "Sector repair completed"},
198 	{0x0024, "Sbuf memory test failed"},
199 	{0x0025, "Error flushing cached write data to disk"},
200 	{0x0026, "Drive reported data ECC error"},
201 	{0x0027, "DCB has checksum error"},
202 	{0x0028, "DCB version is unsupported"},
203 	{0x0029, "Background verify started"},
204 	{0x002A, "Background verify failed"},
205 	{0x002B, "Background verify done"},
206 	{0x002C, "Bad sector overwritten during rebuild"},
207 	{0x002E, "Replace failed because replacement drive too small"},
208 	{0x002F, "Verify failed because array was never initialized"},
209 	{0x0030, "Unsupported ATA drive"},
210 	{0x0031, "Synchronize host/controller time"},
211 	{0x0032, "Spare capacity is inadequate for some units"},
212 	{0x0033, "Background migration started"},
213 	{0x0034, "Background migration failed"},
214 	{0x0035, "Background migration done"},
215 	{0x0036, "Verify detected and fixed data/parity mismatch"},
216 	{0x0037, "SO-DIMM incompatible"},
217 	{0x0038, "SO-DIMM not detected"},
218 	{0x0039, "Corrected Sbuf ECC error"},
219 	{0x003A, "Drive power on reset detected"},
220 	{0x003B, "Background rebuild paused"},
221 	{0x003C, "Background initialize paused"},
222 	{0x003D, "Background verify paused"},
223 	{0x003E, "Background migration paused"},
224 	{0x003F, "Corrupt flash file system detected"},
225 	{0x0040, "Flash file system repaired"},
226 	{0x0041, "Unit number assignments were lost"},
227 	{0x0042, "Error during read of primary DCB"},
228 	{0x0043, "Latent error found in backup DCB"},
229 	{0x0044, "Battery voltage is normal"},
230 	{0x0045, "Battery voltage is low"},
231 	{0x0046, "Battery voltage is high"},
232 	{0x0047, "Battery voltage is too low"},
233 	{0x0048, "Battery voltage is too high"},
234 	{0x0049, "Battery temperature is normal"},
235 	{0x004A, "Battery temperature is low"},
236 	{0x004B, "Battery temperature is high"},
237 	{0x004C, "Battery temperature is too low"},
238 	{0x004D, "Battery temperature is too high"},
239 	{0x004E, "Battery capacity test started"},
240 	{0x004F, "Cache synchronization skipped"},
241 	{0x0050, "Battery capacity test completed"},
242 	{0x0051, "Battery health check started"},
243 	{0x0052, "Battery health check completed"},
244 	{0x0053, "Need to do a capacity test"},
245 	{0x0054, "Charge termination voltage is at high level"},
246 	{0x0055, "Battery charging started"},
247 	{0x0056, "Battery charging completed"},
248 	{0x0057, "Battery charging fault"},
249 	{0x0058, "Battery capacity is below warning level"},
250 	{0x0059, "Battery capacity is below error level"},
251 	{0x005A, "Battery is present"},
252 	{0x005B, "Battery is not present"},
253 	{0x005C, "Battery is weak"},
254 	{0x005D, "Battery health check failed"},
255 	{0x005E, "Cache synchronized after power fail"},
256 	{0x005F, "Cache synchronization failed; some data lost"},
257 	{0x0060, "Bad cache meta data checksum"},
258 	{0x0061, "Bad cache meta data signature"},
259 	{0x0062, "Cache meta data restore failed"},
260 	{0x0063, "BBU not found after power fail"},
261 	{0x00FC, "Recovered/finished array membership update"},
262 	{0x00FD, "Handler lockup"},
263 	{0x00FE, "Retrying PCI transfer"},
264 	{0x00FF, "AEN queue is full"},
265 	{0xFFFFFFFF, (char *)NULL}
266 };
267 
268 /* AEN severity table. */
269 static const char	*twa_aen_severity_table[] = {
270 	"None",
271 	"ERROR",
272 	"WARNING",
273 	"INFO",
274 	"DEBUG",
275 	(char *)NULL
276 };
277 
278 /* Error messages. */
279 static const struct twa_message	twa_error_table[] = {
280 	{0x0100, "SGL entry contains zero data"},
281 	{0x0101, "Invalid command opcode"},
282 	{0x0102, "SGL entry has unaligned address"},
283 	{0x0103, "SGL size does not match command"},
284 	{0x0104, "SGL entry has illegal length"},
285 	{0x0105, "Command packet is not aligned"},
286 	{0x0106, "Invalid request ID"},
287 	{0x0107, "Duplicate request ID"},
288 	{0x0108, "ID not locked"},
289 	{0x0109, "LBA out of range"},
290 	{0x010A, "Logical unit not supported"},
291 	{0x010B, "Parameter table does not exist"},
292 	{0x010C, "Parameter index does not exist"},
293 	{0x010D, "Invalid field in CDB"},
294 	{0x010E, "Specified port has invalid drive"},
295 	{0x010F, "Parameter item size mismatch"},
296 	{0x0110, "Failed memory allocation"},
297 	{0x0111, "Memory request too large"},
298 	{0x0112, "Out of memory segments"},
299 	{0x0113, "Invalid address to deallocate"},
300 	{0x0114, "Out of memory"},
301 	{0x0115, "Out of heap"},
302 	{0x0120, "Double degrade"},
303 	{0x0121, "Drive not degraded"},
304 	{0x0122, "Reconstruct error"},
305 	{0x0123, "Replace not accepted"},
306 	{0x0124, "Replace drive capacity too small"},
307 	{0x0125, "Sector count not allowed"},
308 	{0x0126, "No spares left"},
309 	{0x0127, "Reconstruct error"},
310 	{0x0128, "Unit is offline"},
311 	{0x0129, "Cannot update status to DCB"},
312 	{0x0130, "Invalid stripe handle"},
313 	{0x0131, "Handle that was not locked"},
314 	{0x0132, "Handle that was not empy"},
315 	{0x0133, "Handle has different owner"},
316 	{0x0140, "IPR has parent"},
317 	{0x0150, "Illegal Pbuf address alignment"},
318 	{0x0151, "Illegal Pbuf transfer length"},
319 	{0x0152, "Illegal Sbuf address alignment"},
320 	{0x0153, "Illegal Sbuf transfer length"},
321 	{0x0160, "Command packet too large"},
322 	{0x0161, "SGL exceeds maximum length"},
323 	{0x0162, "SGL has too many entries"},
324 	{0x0170, "Insufficient resources for rebuilder"},
325 	{0x0171, "Verify error (data != parity)"},
326 	{0x0180, "Requested segment not in directory of this DCB"},
327 	{0x0181, "DCB segment has unsupported version"},
328 	{0x0182, "DCB segment has checksum error"},
329 	{0x0183, "DCB support (settings) segment invalid"},
330 	{0x0184, "DCB UDB (unit descriptor block) segment invalid"},
331 	{0x0185, "DCB GUID (globally unique identifier) segment invalid"},
332 	{0x01A0, "Could not clear Sbuf"},
333 	{0x01C0, "Flash identify failed"},
334 	{0x01C1, "Flash out of bounds"},
335 	{0x01C2, "Flash verify error"},
336 	{0x01C3, "Flash file object not found"},
337 	{0x01C4, "Flash file already present"},
338 	{0x01C5, "Flash file system full"},
339 	{0x01C6, "Flash file not present"},
340 	{0x01C7, "Flash file size error"},
341 	{0x01C8, "Bad flash file checksum"},
342 	{0x01CA, "Corrupt flash file system detected"},
343 	{0x01D0, "Invalid field in parameter list"},
344 	{0x01D1, "Parameter list length error"},
345 	{0x01D2, "Parameter item is not changeable"},
346 	{0x01D3, "Parameter item is not saveable"},
347 	{0x0200, "UDMA CRC error"},
348 	{0x0201, "Internal CRC error"},
349 	{0x0202, "Data ECC error"},
350 	{0x0203, "ADP level 1 error"},
351 	{0x0204, "Port timeout"},
352 	{0x0205, "Drive power on reset"},
353 	{0x0206, "ADP level 2 error"},
354 	{0x0207, "Soft reset failed"},
355 	{0x0208, "Drive not ready"},
356 	{0x0209, "Unclassified port error"},
357 	{0x020A, "Drive aborted command"},
358 	{0x0210, "Internal CRC error"},
359 	{0x0211, "Host PCI bus abort"},
360 	{0x0212, "Host PCI parity error"},
361 	{0x0213, "Port handler error"},
362 	{0x0214, "Token interrupt count error"},
363 	{0x0215, "Timeout waiting for PCI transfer"},
364 	{0x0216, "Corrected buffer ECC"},
365 	{0x0217, "Uncorrected buffer ECC"},
366 	{0x0230, "Unsupported command during flash recovery"},
367 	{0x0231, "Next image buffer expected"},
368 	{0x0232, "Binary image architecture incompatible"},
369 	{0x0233, "Binary image has no signature"},
370 	{0x0234, "Binary image has bad checksum"},
371 	{0x0235, "Image downloaded overflowed buffer"},
372 	{0x0240, "I2C device not found"},
373 	{0x0241, "I2C transaction aborted"},
374 	{0x0242, "SO-DIMM parameter(s) incompatible using defaults"},
375 	{0x0243, "SO-DIMM unsupported"},
376 	{0x0248, "SPI transfer status error"},
377 	{0x0249, "SPI transfer timeout error"},
378 	{0x0250, "Invalid unit descriptor size in CreateUnit"},
379 	{0x0251, "Unit descriptor size exceeds data buffer in CreateUnit"},
380 	{0x0252, "Invalid value in CreateUnit descriptor"},
381 	{0x0253, "Inadequate disk space to support descriptor in CreateUnit"},
382 	{0x0254, "Unable to create data channel for this unit descriptor"},
383 	{0x0255, "CreateUnit descriptor specifies a drive already in use"},
384        {0x0256, "Unable to write configuration to all disks during CreateUnit"},
385 	{0x0257, "CreateUnit does not support this descriptor version"},
386 	{0x0258, "Invalid subunit for RAID 0 or 5 in CreateUnit"},
387 	{0x0259, "Too many descriptors in CreateUnit"},
388 	{0x025A, "Invalid configuration specified in CreateUnit descriptor"},
389 	{0x025B, "Invalid LBA offset specified in CreateUnit descriptor"},
390 	{0x025C, "Invalid stripelet size specified in CreateUnit descriptor"},
391 	{0x0260, "SMART attribute exceeded threshold"},
392 	{0xFFFFFFFF, (char *)NULL}
393 };
394 
395 struct twa_pci_identity {
396 	uint32_t	vendor_id;
397 	uint32_t	product_id;
398 	const char	*name;
399 };
400 
401 static const struct twa_pci_identity pci_twa_products[] = {
402 	{ PCI_VENDOR_3WARE,
403 	  PCI_PRODUCT_3WARE_9000,
404 	  "3ware 9000 series",
405 	},
406 	{ PCI_VENDOR_3WARE,
407 	  PCI_PRODUCT_3WARE_9550,
408 	  "3ware 9550SX series",
409 	},
410 	{ 0,
411 	  0,
412 	  NULL,
413 	},
414 };
415 
416 
417 static inline void
418 twa_outl(struct twa_softc *sc, int off, u_int32_t val)
419 {
420 	bus_space_write_4(sc->twa_bus_iot, sc->twa_bus_ioh, off, val);
421 	bus_space_barrier(sc->twa_bus_iot, sc->twa_bus_ioh, off, 4,
422 	    BUS_SPACE_BARRIER_WRITE);
423 }
424 
425 
426 static inline u_int32_t	twa_inl(struct twa_softc *sc, int off)
427 {
428 	bus_space_barrier(sc->twa_bus_iot, sc->twa_bus_ioh, off, 4,
429 	    BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
430 	return (bus_space_read_4(sc->twa_bus_iot, sc->twa_bus_ioh, off));
431 }
432 
433 void
434 twa_request_wait_handler(struct twa_request *tr)
435 {
436 	wakeup(tr);
437 }
438 
439 
440 static int
441 twa_match(struct device *parent, struct cfdata *cfdata, void *aux)
442 {
443 	int i;
444 	struct pci_attach_args *pa = aux;
445 	const struct twa_pci_identity *entry = 0;
446 
447 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_3WARE) {
448 		for (i = 0; (pci_twa_products[i].product_id); i++) {
449 			entry = &pci_twa_products[i];
450 			if (entry->product_id == PCI_PRODUCT(pa->pa_id)) {
451 				aprint_normal("%s: (rev. 0x%02x)\n",
452 				    entry->name, PCI_REVISION(pa->pa_class));
453 				return (1);
454 			}
455 		}
456 	}
457 	return (0);
458 }
459 
460 
461 static const char *
462 twa_find_msg_string(const struct twa_message *table, u_int16_t code)
463 {
464 	int	i;
465 
466 	for (i = 0; table[i].message != NULL; i++)
467 		if (table[i].code == code)
468 			return(table[i].message);
469 
470 	return(table[i].message);
471 }
472 
473 
474 void
475 twa_release_request(struct twa_request *tr)
476 {
477 	int s;
478 	struct twa_softc *sc;
479 
480 	sc = tr->tr_sc;
481 
482 	if ((tr->tr_flags & TWA_CMD_AEN) == 0) {
483 		s = splbio();
484 		TAILQ_INSERT_TAIL(&tr->tr_sc->twa_free, tr, tr_link);
485 		splx(s);
486 		if (__predict_false((tr->tr_sc->twa_sc_flags &
487 		    TWA_STATE_REQUEST_WAIT) != 0)) {
488 			tr->tr_sc->twa_sc_flags &= ~TWA_STATE_REQUEST_WAIT;
489 			wakeup(&sc->twa_free);
490 		}
491 	} else
492 		tr->tr_flags &= ~TWA_CMD_AEN_BUSY;
493 }
494 
495 
496 static void
497 twa_unmap_request(struct twa_request *tr)
498 {
499 	struct twa_softc	*sc = tr->tr_sc;
500 	u_int8_t		cmd_status;
501 
502 	/* If the command involved data, unmap that too. */
503 	if (tr->tr_data != NULL) {
504 		if (tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_9K)
505 			cmd_status = tr->tr_command->command.cmd_pkt_9k.status;
506 		else
507 			cmd_status =
508 			      tr->tr_command->command.cmd_pkt_7k.generic.status;
509 
510 		if (tr->tr_flags & TWA_CMD_DATA_OUT) {
511 			bus_dmamap_sync(tr->tr_sc->twa_dma_tag, tr->tr_dma_map,
512 				0, tr->tr_length, BUS_DMASYNC_POSTREAD);
513 			/*
514 			 * If we are using a bounce buffer, and we are reading
515 			 * data, copy the real data in.
516 			 */
517 			if (tr->tr_flags & TWA_CMD_DATA_COPY_NEEDED)
518 				if (cmd_status == 0)
519 					memcpy(tr->tr_real_data, tr->tr_data,
520 						tr->tr_real_length);
521 		}
522 		if (tr->tr_flags & TWA_CMD_DATA_IN)
523 			bus_dmamap_sync(tr->tr_sc->twa_dma_tag, tr->tr_dma_map,
524 				0, tr->tr_length, BUS_DMASYNC_POSTWRITE);
525 
526 		bus_dmamap_unload(sc->twa_dma_tag, tr->tr_dma_map);
527 	}
528 
529 	/* Free alignment buffer if it was used. */
530 	if (tr->tr_flags & TWA_CMD_DATA_COPY_NEEDED) {
531 		uvm_km_free(kmem_map, (vaddr_t)tr->tr_data,
532 		    tr->tr_length, UVM_KMF_WIRED);
533 		tr->tr_data = tr->tr_real_data;
534 		tr->tr_length = tr->tr_real_length;
535 	}
536 }
537 
538 
539 /*
540  * Function name:	twa_wait_request
541  * Description:		Sends down a firmware cmd, and waits for the completion,
542  *			but NOT in a tight loop.
543  *
544  * Input:		tr	-- ptr to request pkt
545  *			timeout -- max # of seconds to wait before giving up
546  * Output:		None
547  * Return value:	0	-- success
548  *			non-zero-- failure
549  */
550 static int
551 twa_wait_request(struct twa_request *tr, u_int32_t timeout)
552 {
553 	time_t	end_time;
554 	struct timeval	t1;
555 	int	s, error;
556 
557 	tr->tr_flags |= TWA_CMD_SLEEP_ON_REQUEST;
558 	tr->tr_callback = twa_request_wait_handler;
559 	tr->tr_status = TWA_CMD_BUSY;
560 
561 	if ((error = twa_map_request(tr)))
562 		return (error);
563 
564 	microtime(&t1);
565 	end_time = t1.tv_usec +
566 		(timeout * 1000 * 100);
567 
568 	while (tr->tr_status != TWA_CMD_COMPLETE) {
569 		if ((error = tr->tr_error))
570 			return(error);
571 		if ((error = tsleep(tr, PRIBIO, "twawait", timeout * hz)) == 0)
572 		{
573 			error = (tr->tr_status != TWA_CMD_COMPLETE);
574 			break;
575 		}
576 		if (error == EWOULDBLOCK) {
577 			/*
578 			 * We will reset the controller only if the request has
579 			 * already been submitted, so as to not lose the
580 			 * request packet.  If a busy request timed out, the
581 			 * reset will take care of freeing resources.  If a
582 			 * pending request timed out, we will free resources
583 			 * for that request, right here.  So, the caller is
584 			 * expected to NOT cleanup when ETIMEDOUT is returned.
585 			 */
586 			if (tr->tr_status != TWA_CMD_PENDING &&
587 			    tr->tr_status != TWA_CMD_COMPLETE)
588 				twa_reset(tr->tr_sc);
589 			else {
590 				/* Request was never submitted.  Clean up. */
591 				s = splbio();
592 				TAILQ_REMOVE(&tr->tr_sc->twa_pending, tr, tr_link);
593 				splx(s);
594 
595 				twa_unmap_request(tr);
596 				if (tr->tr_data)
597 					free(tr->tr_data, M_DEVBUF);
598 
599 				twa_release_request(tr);
600 			}
601 			return(ETIMEDOUT);
602 		}
603 		/*
604 		 * Either the request got completed, or we were woken up by a
605 		 * signal.  Calculate the new timeout, in case it was the latter.
606 		 */
607 		microtime(&t1);
608 
609 		timeout = (end_time - t1.tv_usec) / (1000 * 100);
610 	}
611 	twa_unmap_request(tr);
612 	return(error);
613 }
614 
615 
616 /*
617  * Function name:	twa_immediate_request
618  * Description:		Sends down a firmware cmd, and waits for the completion
619  *			in a tight loop.
620  *
621  * Input:		tr	-- ptr to request pkt
622  *			timeout -- max # of seconds to wait before giving up
623  * Output:		None
624  * Return value:	0	-- success
625  *			non-zero-- failure
626  */
627 static int
628 twa_immediate_request(struct twa_request *tr, u_int32_t timeout)
629 {
630 	struct timeval t1;
631 	int	s = 0, error = 0;
632 
633 	if ((error = twa_map_request(tr))) {
634 		return(error);
635 	}
636 
637 	timeout = (timeout * 10000 * 10);
638 
639 	microtime(&t1);
640 
641 	timeout += t1.tv_usec;
642 
643 	do {
644 		if ((error = tr->tr_error))
645 			return(error);
646 		twa_done(tr->tr_sc);
647 		if ((tr->tr_status != TWA_CMD_BUSY) &&
648 			(tr->tr_status != TWA_CMD_PENDING)) {
649 			twa_unmap_request(tr);
650 			return(tr->tr_status != TWA_CMD_COMPLETE);
651 		}
652 		microtime(&t1);
653 	} while (t1.tv_usec <= timeout);
654 
655 	/*
656 	 * We will reset the controller only if the request has
657 	 * already been submitted, so as to not lose the
658 	 * request packet.  If a busy request timed out, the
659 	 * reset will take care of freeing resources.  If a
660 	 * pending request timed out, we will free resources
661 	 * for that request, right here.  So, the caller is
662 	 * expected to NOT cleanup when ETIMEDOUT is returned.
663 	 */
664 	if (tr->tr_status != TWA_CMD_PENDING)
665 		twa_reset(tr->tr_sc);
666 	else {
667 		/* Request was never submitted.  Clean up. */
668 		s = splbio();
669 		TAILQ_REMOVE(&tr->tr_sc->twa_pending, tr, tr_link);
670 		splx(s);
671 		twa_unmap_request(tr);
672 		if (tr->tr_data)
673 			free(tr->tr_data, M_DEVBUF);
674 
675 		twa_release_request(tr);
676 	}
677 	return(ETIMEDOUT);
678 }
679 
680 
681 static int
682 twa_inquiry(struct twa_request *tr, int lunid)
683 {
684 	int error;
685 	struct twa_command_9k *tr_9k_cmd;
686 
687 	if (tr->tr_data == NULL)
688 		return (ENOMEM);
689 
690 	memset(tr->tr_data, 0, TWA_SECTOR_SIZE);
691 
692 	tr->tr_length = TWA_SECTOR_SIZE;
693 	tr->tr_cmd_pkt_type = TWA_CMD_PKT_TYPE_9K;
694 	tr->tr_flags |= TWA_CMD_DATA_IN;
695 
696 	tr_9k_cmd = &tr->tr_command->command.cmd_pkt_9k;
697 
698 	tr_9k_cmd->command.opcode = TWA_OP_EXECUTE_SCSI_COMMAND;
699 	tr_9k_cmd->unit = lunid;
700 	tr_9k_cmd->request_id = tr->tr_request_id;
701 	tr_9k_cmd->status = 0;
702 	tr_9k_cmd->sgl_offset = 16;
703 	tr_9k_cmd->sgl_entries = 1;
704 	/* create the CDB here */
705 	tr_9k_cmd->cdb[0] = INQUIRY;
706 	tr_9k_cmd->cdb[1] = ((lunid << 5) & 0x0e);
707 	tr_9k_cmd->cdb[4] = 255;
708 
709 	/* XXXX setup page data no lun device
710 	 * it seems 9000 series does not indicate
711 	 * NOTPRESENT - need more investigation
712 	 */
713 	((struct scsipi_inquiry_data *)tr->tr_data)->device =
714 		SID_QUAL_LU_NOTPRESENT;
715 
716 	error = twa_immediate_request(tr, TWA_REQUEST_TIMEOUT_PERIOD);
717 
718 	if (((struct scsipi_inquiry_data *)tr->tr_data)->device ==
719 		SID_QUAL_LU_NOTPRESENT)
720 		error = 1;
721 
722 	return (error);
723 }
724 
725 static int
726 twa_print_inquiry_data(struct twa_softc *sc,
727 	struct scsipi_inquiry_data *scsipi)
728 {
729     printf("%s: %s\n", sc->twa_dv.dv_xname, scsipi->vendor);
730 
731     return (1);
732 }
733 
734 
735 static uint64_t
736 twa_read_capacity(struct twa_request *tr, int lunid)
737 {
738 	int error;
739 	struct twa_command_9k *tr_9k_cmd;
740 	uint64_t array_size = 0LL;
741 
742 	if (tr->tr_data == NULL)
743 		return (ENOMEM);
744 
745 	memset(tr->tr_data, 0, TWA_SECTOR_SIZE);
746 
747 	tr->tr_length = TWA_SECTOR_SIZE;
748 	tr->tr_cmd_pkt_type = TWA_CMD_PKT_TYPE_9K;
749 	tr->tr_flags |= TWA_CMD_DATA_OUT;
750 
751 	tr_9k_cmd = &tr->tr_command->command.cmd_pkt_9k;
752 
753 	tr_9k_cmd->command.opcode = TWA_OP_EXECUTE_SCSI_COMMAND;
754 	tr_9k_cmd->unit = lunid;
755 	tr_9k_cmd->request_id = tr->tr_request_id;
756 	tr_9k_cmd->status = 0;
757 	tr_9k_cmd->sgl_offset = 16;
758 	tr_9k_cmd->sgl_entries = 1;
759 	/* create the CDB here */
760 	tr_9k_cmd->cdb[0] = READ_CAPACITY_16;
761 	tr_9k_cmd->cdb[1] = ((lunid << 5) & 0x0e) | SRC16_SERVICE_ACTION;
762 
763 	error = twa_immediate_request(tr, TWA_REQUEST_TIMEOUT_PERIOD);
764 #if BYTE_ORDER == BIG_ENDIAN
765 	array_size = bswap64(_8btol(((struct scsipi_read_capacity_16_data *)
766 				tr->tr_data)->addr) + 1);
767 #else
768 	array_size = _8btol(((struct scsipi_read_capacity_16_data *)
769 				tr->tr_data)->addr) + 1;
770 #endif
771 	return (array_size);
772 }
773 
774 static int
775 twa_request_sense(struct twa_request *tr, int lunid)
776 {
777 	int error = 1;
778 	struct twa_command_9k *tr_9k_cmd;
779 
780 	if (tr->tr_data == NULL)
781 		return (error);
782 
783 	memset(tr->tr_data, 0, TWA_SECTOR_SIZE);
784 
785 	tr->tr_length = TWA_SECTOR_SIZE;
786 	tr->tr_cmd_pkt_type = TWA_CMD_PKT_TYPE_9K;
787 	tr->tr_flags |= TWA_CMD_DATA_OUT;
788 
789 	tr_9k_cmd = &tr->tr_command->command.cmd_pkt_9k;
790 
791 	tr_9k_cmd->command.opcode = TWA_OP_EXECUTE_SCSI_COMMAND;
792 	tr_9k_cmd->unit = lunid;
793 	tr_9k_cmd->request_id = tr->tr_request_id;
794 	tr_9k_cmd->status = 0;
795 	tr_9k_cmd->sgl_offset = 16;
796 	tr_9k_cmd->sgl_entries = 1;
797 	/* create the CDB here */
798 	tr_9k_cmd->cdb[0] = SCSI_REQUEST_SENSE;
799 	tr_9k_cmd->cdb[1] = ((lunid << 5) & 0x0e);
800 	tr_9k_cmd->cdb[4] = 255;
801 
802 	/*XXX AEN notification called in interrupt context
803 	 * so just queue the request. Return as quickly
804 	 * as possible from interrupt
805 	 */
806 	if ((tr->tr_flags & TWA_CMD_AEN) != 0)
807 		error = twa_map_request(tr);
808  	else
809 		error = twa_immediate_request(tr, TWA_REQUEST_TIMEOUT_PERIOD);
810 
811 	return (error);
812 }
813 
814 
815 
816 static int
817 twa_alloc_req_pkts(struct twa_softc *sc, int num_reqs)
818 {
819 	struct twa_request	*tr;
820 	struct twa_command_packet *tc;
821 	bus_dma_segment_t	seg;
822 	size_t max_segs, max_xfer;
823 	int	i, rv, rseg, size;
824 
825 	if ((sc->twa_req_buf = malloc(num_reqs * sizeof(struct twa_request),
826 					M_DEVBUF, M_NOWAIT)) == NULL)
827 		return(ENOMEM);
828 
829 	size = num_reqs * sizeof(struct twa_command_packet);
830 
831 	/* Allocate memory for cmd pkts. */
832 	if ((rv = bus_dmamem_alloc(sc->twa_dma_tag,
833 		size, PAGE_SIZE, 0, &seg,
834 		1, &rseg, BUS_DMA_NOWAIT)) != 0){
835 			aprint_error("%s: unable to allocate "
836 				"command packets, rv = %d\n",
837 				sc->twa_dv.dv_xname, rv);
838 			return (ENOMEM);
839 	}
840 
841 	if ((rv = bus_dmamem_map(sc->twa_dma_tag,
842 		&seg, rseg, size, (caddr_t *)&sc->twa_cmds,
843 		BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
844 			aprint_error("%s: unable to map commands, rv = %d\n",
845 				sc->twa_dv.dv_xname, rv);
846 			return (1);
847 	}
848 
849 	if ((rv = bus_dmamap_create(sc->twa_dma_tag,
850 		size, num_reqs, size,
851 		0, BUS_DMA_NOWAIT, &sc->twa_cmd_map)) != 0) {
852 			aprint_error("%s: unable to create command DMA map, "
853 				"rv = %d\n", sc->twa_dv.dv_xname, rv);
854 			return (ENOMEM);
855 	}
856 
857 	if ((rv = bus_dmamap_load(sc->twa_dma_tag, sc->twa_cmd_map,
858 		sc->twa_cmds, size, NULL,
859 		BUS_DMA_NOWAIT)) != 0) {
860 			aprint_error("%s: unable to load command DMA map, "
861 				"rv = %d\n", sc->twa_dv.dv_xname, rv);
862 			return (1);
863 	}
864 
865 	if ((uintptr_t)sc->twa_cmds % TWA_ALIGNMENT) {
866 		aprint_error("%s: DMA map memory not aligned on %d boundary\n",
867 			sc->twa_dv.dv_xname, TWA_ALIGNMENT);
868 
869 		return (1);
870 	}
871 	tc = sc->twa_cmd_pkt_buf = (struct twa_command_packet *)sc->twa_cmds;
872 	sc->twa_cmd_pkt_phys = sc->twa_cmd_map->dm_segs[0].ds_addr;
873 
874 	memset(sc->twa_req_buf, 0, num_reqs * sizeof(struct twa_request));
875 	memset(sc->twa_cmd_pkt_buf, 0,
876 		num_reqs * sizeof(struct twa_command_packet));
877 
878 	sc->sc_twa_request = sc->twa_req_buf;
879 	max_segs = twa_get_maxsegs();
880 	max_xfer = twa_get_maxxfer(max_segs);
881 
882 	for (i = 0; i < num_reqs; i++, tc++) {
883 		tr = &(sc->twa_req_buf[i]);
884 		tr->tr_command = tc;
885 		tr->tr_cmd_phys = sc->twa_cmd_pkt_phys +
886 				(i * sizeof(struct twa_command_packet));
887 		tr->tr_request_id = i;
888 		tr->tr_sc = sc;
889 
890 		/*
891 		 * Create a map for data buffers.  maxsize (256 * 1024) used in
892 		 * bus_dma_tag_create above should suffice the bounce page needs
893 		 * for data buffers, since the max I/O size we support is 128KB.
894 		 * If we supported I/O's bigger than 256KB, we would have to
895 		 * create a second dma_tag, with the appropriate maxsize.
896 		 */
897 		if ((rv = bus_dmamap_create(sc->twa_dma_tag,
898 			max_xfer, max_segs, 1, 0, BUS_DMA_NOWAIT,
899 			&tr->tr_dma_map)) != 0) {
900 				aprint_error("%s: unable to create command "
901 					"DMA map, rv = %d\n",
902 					sc->twa_dv.dv_xname, rv);
903 				return (ENOMEM);
904 		}
905 		/* Insert request into the free queue. */
906 		if (i != 0) {
907 			sc->twa_lookup[i] = tr;
908 			twa_release_request(tr);
909 		} else
910 			tr->tr_flags |= TWA_CMD_AEN;
911 	}
912 	return(0);
913 }
914 
915 
916 static void
917 twa_recompute_openings(struct twa_softc *sc)
918 {
919 	struct twa_drive *td;
920 	int unit;
921 	int openings;
922 
923 	if (sc->sc_nunits != 0)
924 		openings = ((TWA_Q_LENGTH / 2) / sc->sc_nunits);
925 	else
926 		openings = 0;
927 	if (openings == sc->sc_openings)
928 		return;
929 	sc->sc_openings = openings;
930 
931 #ifdef TWA_DEBUG
932 	printf("%s: %d array%s, %d openings per array\n",
933 	    sc->sc_twa.dv_xname, sc->sc_nunits,
934 	    sc->sc_nunits == 1 ? "" : "s", sc->sc_openings);
935 #endif
936 	for (unit = 0; unit < TWA_MAX_UNITS; unit++) {
937 		td = &sc->sc_units[unit];
938 		if (td->td_dev != NULL)
939 			(*td->td_callbacks->tcb_openings)(td->td_dev,
940 				sc->sc_openings);
941 	}
942 }
943 
944 
945 static int
946 twa_request_bus_scan(struct twa_softc *sc)
947 {
948 	struct twa_drive *td;
949 	struct twa_request *tr;
950 	struct twa_attach_args twaa;
951 	int locs[TWACF_NLOCS];
952 	int s, unit;
953 
954 	s = splbio();
955 	for (unit = 0; unit < TWA_MAX_UNITS; unit++) {
956 
957 		if ((tr = twa_get_request(sc, 0)) == NULL) {
958 			splx(s);
959 			return (EIO);
960 		}
961 
962 		tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_INTERNAL;
963 
964 		tr->tr_data = malloc(TWA_SECTOR_SIZE, M_DEVBUF, M_NOWAIT);
965 
966 		if (tr->tr_data == NULL) {
967 			twa_release_request(tr);
968 			splx(s);
969 			return (ENOMEM);
970 		}
971 		td = &sc->sc_units[unit];
972 
973 		if (twa_inquiry(tr, unit) == 0) {
974 
975 			if (td->td_dev == NULL) {
976             			twa_print_inquiry_data(sc,
977 				   ((struct scsipi_inquiry_data *)tr->tr_data));
978 
979 				sc->sc_nunits++;
980 
981 				sc->sc_units[unit].td_size =
982 					twa_read_capacity(tr, unit);
983 
984 				twaa.twaa_unit = unit;
985 
986 				twa_recompute_openings(sc);
987 
988 				locs[TWACF_UNIT] = unit;
989 
990 				sc->sc_units[unit].td_dev =
991 				    	config_found_sm_loc(&sc->twa_dv, "twa", locs,
992 					    &twaa, twa_print, config_stdsubmatch);
993 			}
994 		} else {
995 			if (td->td_dev != NULL) {
996 
997 				sc->sc_nunits--;
998 
999 				(void) config_detach(td->td_dev, DETACH_FORCE);
1000 				td->td_dev = NULL;
1001 				td->td_size = 0;
1002 
1003 				twa_recompute_openings(sc);
1004 			}
1005 		}
1006 		free(tr->tr_data, M_DEVBUF);
1007 
1008 		twa_release_request(tr);
1009 	}
1010 	splx(s);
1011 
1012 	return (0);
1013 }
1014 
1015 
1016 static int
1017 twa_start(struct twa_request *tr)
1018 {
1019 	struct twa_softc	*sc = tr->tr_sc;
1020 	u_int32_t		status_reg;
1021 	int			s;
1022 	int			error;
1023 
1024 	s = splbio();
1025 	/* Check to see if we can post a command. */
1026 	status_reg = twa_inl(sc, TWA_STATUS_REGISTER_OFFSET);
1027 	if ((error = twa_check_ctlr_state(sc, status_reg)))
1028 		goto out;
1029 
1030 	if (status_reg & TWA_STATUS_COMMAND_QUEUE_FULL) {
1031 			if (tr->tr_status != TWA_CMD_PENDING) {
1032 				tr->tr_status = TWA_CMD_PENDING;
1033 				TAILQ_INSERT_TAIL(&tr->tr_sc->twa_pending,
1034 					tr, tr_link);
1035 			}
1036 			twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
1037 					TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
1038 			error = EBUSY;
1039 	} else {
1040 	   	bus_dmamap_sync(sc->twa_dma_tag, sc->twa_cmd_map,
1041 			(caddr_t)tr->tr_command - sc->twa_cmds,
1042 			sizeof(struct twa_command_packet),
1043 			BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1044 
1045 		/* Cmd queue is not full.  Post the command. */
1046 		TWA_WRITE_COMMAND_QUEUE(sc, tr->tr_cmd_phys +
1047 			sizeof(struct twa_command_header));
1048 
1049 		/* Mark the request as currently being processed. */
1050 		tr->tr_status = TWA_CMD_BUSY;
1051 		/* Move the request into the busy queue. */
1052 		TAILQ_INSERT_TAIL(&tr->tr_sc->twa_busy, tr, tr_link);
1053 	}
1054 out:
1055 	splx(s);
1056 	return(error);
1057 }
1058 
1059 
1060 static int
1061 twa_drain_response_queue(struct twa_softc *sc)
1062 {
1063 	union twa_response_queue	rq;
1064 	u_int32_t			status_reg;
1065 
1066 	for (;;) {
1067 		status_reg = twa_inl(sc, TWA_STATUS_REGISTER_OFFSET);
1068 		if (twa_check_ctlr_state(sc, status_reg))
1069 			return(1);
1070 		if (status_reg & TWA_STATUS_RESPONSE_QUEUE_EMPTY)
1071 			return(0); /* no more response queue entries */
1072 		rq = (union twa_response_queue)twa_inl(sc, TWA_RESPONSE_QUEUE_OFFSET);
1073 	}
1074 }
1075 
1076 
1077 static void
1078 twa_drain_busy_queue(struct twa_softc *sc)
1079 {
1080 	struct twa_request	*tr;
1081 
1082 	/* Walk the busy queue. */
1083 
1084 	while ((tr = TAILQ_FIRST(&sc->twa_busy)) != NULL) {
1085 		TAILQ_REMOVE(&sc->twa_busy, tr, tr_link);
1086 
1087 		twa_unmap_request(tr);
1088 		if ((tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_INTERNAL) ||
1089 			(tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_IOCTL)) {
1090 			/* It's an internal/ioctl request.  Simply free it. */
1091 			if (tr->tr_data)
1092 				free(tr->tr_data, M_DEVBUF);
1093 			twa_release_request(tr);
1094 		} else {
1095 			/* It's a SCSI request.  Complete it. */
1096 			tr->tr_command->command.cmd_pkt_9k.status = EIO;
1097 			if (tr->tr_callback)
1098 				tr->tr_callback(tr);
1099 		}
1100 	}
1101 }
1102 
1103 
1104 static int
1105 twa_drain_pending_queue(struct twa_softc *sc)
1106 {
1107 	struct twa_request	*tr;
1108 	int			s, error = 0;
1109 
1110 	/*
1111 	 * Pull requests off the pending queue, and submit them.
1112 	 */
1113 	s = splbio();
1114 	while ((tr = TAILQ_FIRST(&sc->twa_pending)) != NULL) {
1115 		TAILQ_REMOVE(&sc->twa_pending, tr, tr_link);
1116 
1117 		if ((error = twa_start(tr))) {
1118 			if (error == EBUSY) {
1119 				tr->tr_status = TWA_CMD_PENDING;
1120 
1121 				/* queue at the head */
1122 				TAILQ_INSERT_HEAD(&tr->tr_sc->twa_pending,
1123 					tr, tr_link);
1124 				error = 0;
1125 				break;
1126 			} else {
1127 				if (tr->tr_flags & TWA_CMD_SLEEP_ON_REQUEST) {
1128 					tr->tr_error = error;
1129 					tr->tr_callback(tr);
1130 					error = EIO;
1131 				}
1132 			}
1133 		}
1134 	}
1135 	splx(s);
1136 
1137 	return(error);
1138 }
1139 
1140 
1141 static int
1142 twa_drain_aen_queue(struct twa_softc *sc)
1143 {
1144 	int				error = 0;
1145 	struct twa_request		*tr;
1146 	struct twa_command_header	*cmd_hdr;
1147 	struct timeval	t1;
1148 	u_int32_t		timeout;
1149 
1150 	for (;;) {
1151 		if ((tr = twa_get_request(sc, 0)) == NULL) {
1152 			error = EIO;
1153 			break;
1154 		}
1155 		tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_INTERNAL;
1156 		tr->tr_callback = NULL;
1157 
1158 		tr->tr_data = malloc(TWA_SECTOR_SIZE, M_DEVBUF, M_NOWAIT);
1159 
1160 		if (tr->tr_data == NULL) {
1161 			error = 1;
1162 			goto out;
1163 		}
1164 
1165 		if (twa_request_sense(tr, 0) != 0) {
1166 			error = 1;
1167 			break;
1168 		}
1169 
1170 		timeout = (1000/*ms*/ * 100/*us*/ * TWA_REQUEST_TIMEOUT_PERIOD);
1171 
1172 		microtime(&t1);
1173 
1174 		timeout += t1.tv_usec;
1175 
1176 		do {
1177 			twa_done(tr->tr_sc);
1178 			if (tr->tr_status != TWA_CMD_BUSY)
1179 				break;
1180 			microtime(&t1);
1181 		} while (t1.tv_usec <= timeout);
1182 
1183 		if (tr->tr_status != TWA_CMD_COMPLETE) {
1184 			error = ETIMEDOUT;
1185 			break;
1186 		}
1187 
1188 		if ((error = tr->tr_command->command.cmd_pkt_9k.status))
1189 			break;
1190 
1191 		cmd_hdr = (struct twa_command_header *)(tr->tr_data);
1192 		if ((cmd_hdr->status_block.error) /* aen_code */
1193 				== TWA_AEN_QUEUE_EMPTY)
1194 			break;
1195 		(void)twa_enqueue_aen(sc, cmd_hdr);
1196 
1197 		free(tr->tr_data, M_DEVBUF);
1198 		twa_release_request(tr);
1199 	}
1200 out:
1201 	if (tr) {
1202 		if (tr->tr_data)
1203 			free(tr->tr_data, M_DEVBUF);
1204 
1205 		twa_release_request(tr);
1206 	}
1207 	return(error);
1208 }
1209 
1210 
1211 static int
1212 twa_done(struct twa_softc *sc)
1213 {
1214 	union twa_response_queue	rq;
1215 	struct twa_request		*tr;
1216 	int				s, error = 0;
1217 	u_int32_t			status_reg;
1218 
1219 	for (;;) {
1220 		status_reg = twa_inl(sc, TWA_STATUS_REGISTER_OFFSET);
1221 		if ((error = twa_check_ctlr_state(sc, status_reg)))
1222 			break;
1223 		if (status_reg & TWA_STATUS_RESPONSE_QUEUE_EMPTY)
1224 			break;
1225 		/* Response queue is not empty. */
1226 		rq = (union twa_response_queue)twa_inl(sc,
1227 			TWA_RESPONSE_QUEUE_OFFSET);
1228 		tr = sc->sc_twa_request + rq.u.response_id;
1229 
1230 		/* Unmap the command packet, and any associated data buffer. */
1231 		twa_unmap_request(tr);
1232 
1233 		s = splbio();
1234 		tr->tr_status = TWA_CMD_COMPLETE;
1235 		TAILQ_REMOVE(&tr->tr_sc->twa_busy, tr, tr_link);
1236 		splx(s);
1237 
1238 		if (tr->tr_callback)
1239 			tr->tr_callback(tr);
1240 	}
1241 	(void)twa_drain_pending_queue(sc);
1242 
1243 	return(error);
1244 }
1245 
1246 /*
1247  * Function name:	twa_init_ctlr
1248  * Description:		Establishes a logical connection with the controller.
1249  *			If bundled with firmware, determines whether or not
1250  *			to flash firmware, based on arch_id, fw SRL (Spec.
1251  *			Revision Level), branch & build #'s.  Also determines
1252  *			whether or not the driver is compatible with the
1253  *			firmware on the controller, before proceeding to work
1254  *			with it.
1255  *
1256  * Input:		sc	-- ptr to per ctlr structure
1257  * Output:		None
1258  * Return value:	0	-- success
1259  *			non-zero-- failure
1260  */
1261 static int
1262 twa_init_ctlr(struct twa_softc *sc)
1263 {
1264 	u_int16_t	fw_on_ctlr_srl = 0;
1265 	u_int16_t	fw_on_ctlr_arch_id = 0;
1266 	u_int16_t	fw_on_ctlr_branch = 0;
1267 	u_int16_t	fw_on_ctlr_build = 0;
1268 	u_int32_t	init_connect_result = 0;
1269 	int		error = 0;
1270 #if 0
1271 	int8_t		fw_flashed = FALSE;
1272 	int8_t		fw_flash_failed = FALSE;
1273 #endif
1274 
1275 	/* Wait for the controller to become ready. */
1276 	if (twa_wait_status(sc, TWA_STATUS_MICROCONTROLLER_READY,
1277 					TWA_REQUEST_TIMEOUT_PERIOD)) {
1278 		return(ENXIO);
1279 	}
1280 	/* Drain the response queue. */
1281 	if (twa_drain_response_queue(sc))
1282 		return(1);
1283 
1284 	/* Establish a logical connection with the controller. */
1285 	if ((error = twa_init_connection(sc, TWA_INIT_MESSAGE_CREDITS,
1286 			TWA_EXTENDED_INIT_CONNECT, TWA_CURRENT_FW_SRL,
1287 			TWA_9000_ARCH_ID, TWA_CURRENT_FW_BRANCH,
1288 			TWA_CURRENT_FW_BUILD, &fw_on_ctlr_srl,
1289 			&fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
1290 			&fw_on_ctlr_build, &init_connect_result))) {
1291 		return(error);
1292 	}
1293 #if 0
1294 	if ((init_connect_result & TWA_BUNDLED_FW_SAFE_TO_FLASH) &&
1295 		(init_connect_result & TWA_CTLR_FW_RECOMMENDS_FLASH)) {
1296 		/*
1297 		 * The bundled firmware is safe to flash, and the firmware
1298 		 * on the controller recommends a flash.  So, flash!
1299 		 */
1300 		printf("%s: flashing bundled firmware...\n", sc->twa_dv.dv_xname);
1301 
1302 		if ((error = twa_flash_firmware(sc))) {
1303 			fw_flash_failed = TRUE;
1304 
1305 			printf("%s: unable to flash bundled firmware.\n", sc->twa_dv.dv_xname);
1306 		} else {
1307 			printf("%s: successfully flashed bundled firmware.\n",
1308 				 sc->twa_dv.dv_xname);
1309 			fw_flashed = TRUE;
1310 		}
1311 	}
1312 	if (fw_flashed) {
1313 		/* The firmware was flashed.  Have the new image loaded */
1314 		error = twa_hard_reset(sc);
1315 		if (error == 0)
1316 			error = twa_init_ctlr(sc);
1317 		/*
1318 		 * If hard reset of controller failed, we need to return.
1319 		 * Otherwise, the above recursive call to twa_init_ctlr will
1320 		 * have completed the rest of the initialization (starting
1321 		 * from twa_drain_aen_queue below).  Don't do it again.
1322 		 * Just return.
1323 		 */
1324 		return(error);
1325 	} else {
1326 		/*
1327 		 * Either we are not bundled with a firmware image, or
1328 		 * the bundled firmware is not safe to flash,
1329 		 * or flash failed for some reason.  See if we can at
1330 		 * least work with the firmware on the controller in the
1331 		 * current mode.
1332 		 */
1333 		if (init_connect_result & TWA_CTLR_FW_COMPATIBLE) {
1334 			/* Yes, we can.  Make note of the operating mode. */
1335 			sc->working_srl = TWA_CURRENT_FW_SRL;
1336 			sc->working_branch = TWA_CURRENT_FW_BRANCH;
1337 			sc->working_build = TWA_CURRENT_FW_BUILD;
1338 		} else {
1339 			/*
1340 			 * No, we can't.  See if we can at least work with
1341 			 * it in the base mode.  We should never come here
1342 			 * if firmware has just been flashed.
1343 			 */
1344 			printf("%s: Driver/Firmware mismatch.  Negotiating for base level.\n",
1345 					sc->twa_dv.dv_xname);
1346 			if ((error = twa_init_connection(sc, TWA_INIT_MESSAGE_CREDITS,
1347 					TWA_EXTENDED_INIT_CONNECT, TWA_BASE_FW_SRL,
1348 					TWA_9000_ARCH_ID, TWA_BASE_FW_BRANCH,
1349 					TWA_BASE_FW_BUILD, &fw_on_ctlr_srl,
1350 					&fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
1351 					&fw_on_ctlr_build, &init_connect_result))) {
1352 						printf("%s: can't initialize connection in base mode.\n",
1353 							sc->twa_dv.dv_xname);
1354 				return(error);
1355 			}
1356 			if (!(init_connect_result & TWA_CTLR_FW_COMPATIBLE)) {
1357 				/*
1358 				 * The firmware on the controller is not even
1359 				 * compatible with our base mode.  We cannot
1360 				 * work with it.  Bail...
1361 				 */
1362 				printf("Incompatible firmware on controller\n");
1363 #ifdef TWA_FLASH_FIRMWARE
1364 				if (fw_flash_failed)
1365 					printf("...and could not flash bundled firmware.\n");
1366 				else
1367 					printf("...and bundled firmware not safe to flash.\n");
1368 #endif /* TWA_FLASH_FIRMWARE */
1369 				return(1);
1370 			}
1371 			/* We can work with this firmware, but only in base mode. */
1372 			sc->working_srl = TWA_BASE_FW_SRL;
1373 			sc->working_branch = TWA_BASE_FW_BRANCH;
1374 			sc->working_build = TWA_BASE_FW_BUILD;
1375 			sc->twa_operating_mode = TWA_BASE_MODE;
1376 		}
1377 	}
1378 #endif
1379 	twa_drain_aen_queue(sc);
1380 
1381 	/* Set controller state to initialized. */
1382 	sc->twa_state &= ~TWA_STATE_SHUTDOWN;
1383 	return(0);
1384 }
1385 
1386 
1387 static int
1388 twa_setup(struct twa_softc *sc)
1389 {
1390 	struct tw_cl_event_packet *aen_queue;
1391 	uint32_t		i = 0;
1392 	int			error = 0;
1393 
1394 	/* Initialize request queues. */
1395 	TAILQ_INIT(&sc->twa_free);
1396 	TAILQ_INIT(&sc->twa_busy);
1397 	TAILQ_INIT(&sc->twa_pending);
1398 
1399 	sc->sc_nunits = 0;
1400 	sc->twa_sc_flags = 0;
1401 
1402 	if (twa_alloc_req_pkts(sc, TWA_Q_LENGTH)) {
1403 
1404 		return(ENOMEM);
1405 	}
1406 
1407 	/* Allocate memory for the AEN queue. */
1408 	if ((aen_queue = malloc(sizeof(struct tw_cl_event_packet) * TWA_Q_LENGTH,
1409 					M_DEVBUF, M_WAITOK)) == NULL) {
1410 		/*
1411 		 * This should not cause us to return error.  We will only be
1412 		 * unable to support AEN's.  But then, we will have to check
1413 		 * time and again to see if we can support AEN's, if we
1414 		 * continue.  So, we will just return error.
1415 		 */
1416 		return (ENOMEM);
1417 	}
1418 	/* Initialize the aen queue. */
1419 	memset(aen_queue, 0, sizeof(struct tw_cl_event_packet) * TWA_Q_LENGTH);
1420 
1421 	for (i = 0; i < TWA_Q_LENGTH; i++)
1422 		sc->twa_aen_queue[i] = &(aen_queue[i]);
1423 
1424 	twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
1425 		TWA_CONTROL_DISABLE_INTERRUPTS);
1426 
1427 	/* Initialize the controller. */
1428 	if ((error = twa_init_ctlr(sc))) {
1429 		/* Soft reset the controller, and try one more time. */
1430 
1431 		printf("%s: controller initialization failed. Retrying initialization\n",
1432 			 sc->twa_dv.dv_xname);
1433 
1434 		if ((error = twa_soft_reset(sc)) == 0)
1435 			error = twa_init_ctlr(sc);
1436 	}
1437 
1438 	twa_describe_controller(sc);
1439 
1440 	error = twa_request_bus_scan(sc);
1441 
1442 	twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
1443 		TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT |
1444 		TWA_CONTROL_UNMASK_RESPONSE_INTERRUPT |
1445 		TWA_CONTROL_ENABLE_INTERRUPTS);
1446 
1447 	return (error);
1448 }
1449 
1450 void *twa_sdh;
1451 
1452 static void
1453 twa_attach(struct device *parent, struct device *self, void *aux)
1454 {
1455 	struct pci_attach_args *pa;
1456 	struct twa_softc *sc;
1457 	pci_chipset_tag_t pc;
1458 	pcireg_t csr;
1459 	pci_intr_handle_t ih;
1460 	const char *intrstr;
1461 
1462 	sc = (struct twa_softc *)self;
1463 
1464 	pa = aux;
1465 	pc = pa->pa_pc;
1466 	sc->pc = pa->pa_pc;
1467 	sc->tag = pa->pa_tag;
1468 	sc->twa_dma_tag = pa->pa_dmat;
1469 
1470 	aprint_naive(": RAID controller\n");
1471 	aprint_normal(": 3ware Apache\n");
1472 
1473 	if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_3WARE_9000) {
1474 		if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0,
1475 	    	    &sc->twa_bus_iot, &sc->twa_bus_ioh, NULL, NULL)) {
1476 			aprint_error("%s: can't map i/o space\n",
1477 			    sc->twa_dv.dv_xname);
1478 			return;
1479 		}
1480 	} else if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_3WARE_9550) {
1481 		if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x08,
1482 	    	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->twa_bus_iot,
1483 		    &sc->twa_bus_ioh, NULL, NULL)) {
1484 			aprint_error("%s: can't map mem space\n",
1485 			    sc->twa_dv.dv_xname);
1486 			return;
1487 		}
1488 	} else {
1489 		aprint_error("%s: product id 0x%02x not recognized\n",
1490 		    sc->twa_dv.dv_xname, PCI_PRODUCT(pa->pa_id));
1491 		return;
1492 	}
1493 	/* Enable the device. */
1494 	csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1495 
1496 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
1497 	    csr | PCI_COMMAND_MASTER_ENABLE);
1498 
1499 	/* Map and establish the interrupt. */
1500 	if (pci_intr_map(pa, &ih)) {
1501 		aprint_error("%s: can't map interrupt\n", sc->twa_dv.dv_xname);
1502 		return;
1503 	}
1504 	intrstr = pci_intr_string(pc, ih);
1505 
1506 	sc->twa_ih = pci_intr_establish(pc, ih, IPL_BIO, twa_intr, sc);
1507 	if (sc->twa_ih == NULL) {
1508 		aprint_error("%s: can't establish interrupt%s%s\n",
1509 			sc->twa_dv.dv_xname,
1510 			(intrstr) ? " at " : "",
1511 			(intrstr) ? intrstr : "");
1512 		return;
1513 	}
1514 
1515 	if (intrstr != NULL)
1516 		aprint_normal("%s: interrupting at %s\n",
1517 			sc->twa_dv.dv_xname, intrstr);
1518 
1519 	twa_setup(sc);
1520 
1521 	if (twa_sdh == NULL)
1522 		twa_sdh = shutdownhook_establish(twa_shutdown, NULL);
1523 
1524 	return;
1525 }
1526 
1527 
1528 static void
1529 twa_shutdown(void *arg)
1530 {
1531 	extern struct cfdriver twa_cd;
1532 	struct twa_softc *sc;
1533 	int i, rv, unit;
1534 
1535 	for (i = 0; i < twa_cd.cd_ndevs; i++) {
1536 		if ((sc = device_lookup(&twa_cd, i)) == NULL)
1537 			continue;
1538 
1539 		for (unit = 0; unit < TWA_MAX_UNITS; unit++)
1540 			if (sc->sc_units[unit].td_dev != NULL)
1541 				(void) config_detach(sc->sc_units[unit].td_dev,
1542 					DETACH_FORCE | DETACH_QUIET);
1543 
1544 		twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
1545 			TWA_CONTROL_DISABLE_INTERRUPTS);
1546 
1547 		/* Let the controller know that we are going down. */
1548 		rv = twa_init_connection(sc, TWA_SHUTDOWN_MESSAGE_CREDITS,
1549 				0, 0, 0, 0, 0,
1550 				NULL, NULL, NULL, NULL, NULL);
1551 	}
1552 }
1553 
1554 
1555 void
1556 twa_register_callbacks(struct twa_softc *sc, int unit,
1557     const struct twa_callbacks *tcb)
1558 {
1559 
1560 	sc->sc_units[unit].td_callbacks = tcb;
1561 }
1562 
1563 
1564 /*
1565  * Print autoconfiguration message for a sub-device
1566  */
1567 static int
1568 twa_print(void *aux, const char *pnp)
1569 {
1570 	struct twa_attach_args *twaa;
1571 
1572 	twaa = aux;
1573 
1574 	if (pnp !=NULL)
1575 		aprint_normal("block device at %s\n", pnp);
1576 	aprint_normal(" unit %d\n", twaa->twaa_unit);
1577 	return (UNCONF);
1578 }
1579 
1580 
1581 static void
1582 twa_fillin_sgl(struct twa_sg *sgl, bus_dma_segment_t *segs, int nsegments)
1583 {
1584 	int	i;
1585 	for (i = 0; i < nsegments; i++) {
1586 		sgl[i].address = segs[i].ds_addr;
1587 		sgl[i].length = (u_int32_t)(segs[i].ds_len);
1588 	}
1589 }
1590 
1591 
1592 static int
1593 twa_submit_io(struct twa_request *tr)
1594 {
1595 	int	error;
1596 
1597 	if ((error = twa_start(tr))) {
1598 		if (error == EBUSY)
1599 			error = 0; /* request is in the pending queue */
1600 		else {
1601 			tr->tr_error = error;
1602 		}
1603 	}
1604 	return(error);
1605 }
1606 
1607 
1608 /*
1609  * Function name:	twa_setup_data_dmamap
1610  * Description:		Callback of bus_dmamap_load for the buffer associated
1611  *			with data.  Updates the cmd pkt (size/sgl_entries
1612  *			fields, as applicable) to reflect the number of sg
1613  *			elements.
1614  *
1615  * Input:		arg	-- ptr to request pkt
1616  *			segs	-- ptr to a list of segment descriptors
1617  *			nsegments--# of segments
1618  *			error	-- 0 if no errors encountered before callback,
1619  *				   non-zero if errors were encountered
1620  * Output:		None
1621  * Return value:	None
1622  */
1623 static int
1624 twa_setup_data_dmamap(void *arg, bus_dma_segment_t *segs,
1625 					int nsegments, int error)
1626 {
1627 	struct twa_request		*tr = (struct twa_request *)arg;
1628 	struct twa_command_packet	*cmdpkt = tr->tr_command;
1629 	struct twa_command_9k		*cmd9k;
1630 	union twa_command_7k		*cmd7k;
1631 	u_int8_t			sgl_offset;
1632 
1633 	if (error == EFBIG) {
1634 		tr->tr_error = error;
1635 		goto out;
1636 	}
1637 
1638 	if (tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_9K) {
1639 		cmd9k = &(cmdpkt->command.cmd_pkt_9k);
1640 		twa_fillin_sgl(&(cmd9k->sg_list[0]), segs, nsegments);
1641 		cmd9k->sgl_entries += nsegments - 1;
1642 	} else {
1643 		/* It's a 7000 command packet. */
1644 		cmd7k = &(cmdpkt->command.cmd_pkt_7k);
1645 		if ((sgl_offset = cmdpkt->command.cmd_pkt_7k.generic.sgl_offset))
1646 			twa_fillin_sgl((struct twa_sg *)
1647 					(((u_int32_t *)cmd7k) + sgl_offset),
1648 					segs, nsegments);
1649 		/* Modify the size field, based on sg address size. */
1650 		cmd7k->generic.size +=
1651 			((TWA_64BIT_ADDRESSES ? 3 : 2) * nsegments);
1652 	}
1653 
1654 	if (tr->tr_flags & TWA_CMD_DATA_IN)
1655 		bus_dmamap_sync(tr->tr_sc->twa_dma_tag, tr->tr_dma_map, 0,
1656 			tr->tr_length, BUS_DMASYNC_PREREAD);
1657 	if (tr->tr_flags & TWA_CMD_DATA_OUT) {
1658 		/*
1659 		 * If we're using an alignment buffer, and we're
1660 		 * writing data, copy the real data out.
1661 		 */
1662 		if (tr->tr_flags & TWA_CMD_DATA_COPY_NEEDED)
1663 			memcpy(tr->tr_data, tr->tr_real_data,
1664 				tr->tr_real_length);
1665 		bus_dmamap_sync(tr->tr_sc->twa_dma_tag, tr->tr_dma_map, 0,
1666 			tr->tr_length, BUS_DMASYNC_PREWRITE);
1667 	}
1668 	error = twa_submit_io(tr);
1669 
1670 out:
1671 	if (error) {
1672 		twa_unmap_request(tr);
1673 		/*
1674 		 * If the caller had been returned EINPROGRESS, and he has
1675 		 * registered a callback for handling completion, the callback
1676 		 * will never get called because we were unable to submit the
1677 		 * request.  So, free up the request right here.
1678 		 */
1679 		if ((tr->tr_flags & TWA_CMD_IN_PROGRESS) && (tr->tr_callback))
1680 			twa_release_request(tr);
1681 	}
1682 	return (error);
1683 }
1684 
1685 
1686 /*
1687  * Function name:	twa_map_request
1688  * Description:		Maps a cmd pkt and data associated with it, into
1689  *			DMA'able memory.
1690  *
1691  * Input:		tr	-- ptr to request pkt
1692  * Output:		None
1693  * Return value:	0	-- success
1694  *			non-zero-- failure
1695  */
1696 int
1697 twa_map_request(struct twa_request *tr)
1698 {
1699 	struct twa_softc	*sc = tr->tr_sc;
1700 	int			 s, rv, error = 0;
1701 
1702 	/* If the command involves data, map that too. */
1703 	if (tr->tr_data != NULL) {
1704 
1705 		if (((u_long)tr->tr_data & (511)) != 0) {
1706 			tr->tr_flags |= TWA_CMD_DATA_COPY_NEEDED;
1707 			tr->tr_real_data = tr->tr_data;
1708 			tr->tr_real_length = tr->tr_length;
1709 			s = splvm();
1710 			tr->tr_data = (void *)uvm_km_alloc(kmem_map,
1711 			    tr->tr_length, 512, UVM_KMF_NOWAIT|UVM_KMF_WIRED);
1712 			splx(s);
1713 
1714 			if (tr->tr_data == NULL) {
1715 				tr->tr_data = tr->tr_real_data;
1716 				tr->tr_length = tr->tr_real_length;
1717 				return(ENOMEM);
1718 			}
1719 			if ((tr->tr_flags & TWA_CMD_DATA_IN) != 0)
1720 				memcpy(tr->tr_data, tr->tr_real_data,
1721 					tr->tr_length);
1722 		}
1723 
1724 		/*
1725 		 * Map the data buffer into bus space and build the S/G list.
1726 		 */
1727 		rv = bus_dmamap_load(sc->twa_dma_tag, tr->tr_dma_map,
1728 			tr->tr_data, tr->tr_length, NULL, BUS_DMA_NOWAIT |
1729 			BUS_DMA_STREAMING | (tr->tr_flags & TWA_CMD_DATA_OUT) ?
1730 			BUS_DMA_READ : BUS_DMA_WRITE);
1731 
1732 		if (rv != 0) {
1733 			if ((tr->tr_flags & TWA_CMD_DATA_COPY_NEEDED) != 0) {
1734 				s = splvm();
1735 				uvm_km_free(kmem_map, (vaddr_t)tr->tr_data,
1736 				    tr->tr_length, UVM_KMF_WIRED);
1737 				splx(s);
1738 			}
1739 			return (rv);
1740 		}
1741 
1742 		if ((rv = twa_setup_data_dmamap(tr,
1743 				tr->tr_dma_map->dm_segs,
1744 				tr->tr_dma_map->dm_nsegs, error))) {
1745 
1746 			if (tr->tr_flags & TWA_CMD_DATA_COPY_NEEDED) {
1747 				uvm_km_free(kmem_map, (vaddr_t)tr->tr_data,
1748 				    tr->tr_length, UVM_KMF_WIRED);
1749 				tr->tr_data = tr->tr_real_data;
1750 				tr->tr_length = tr->tr_real_length;
1751 			}
1752 		} else
1753 			error = tr->tr_error;
1754 
1755 	} else
1756 		if ((rv = twa_submit_io(tr)))
1757 			twa_unmap_request(tr);
1758 
1759 	return (rv);
1760 }
1761 
1762 #if 0
1763 /*
1764  * Function name:	twa_flash_firmware
1765  * Description:		Flashes bundled firmware image onto controller.
1766  *
1767  * Input:		sc	-- ptr to per ctlr structure
1768  * Output:		None
1769  * Return value:	0	-- success
1770  *			non-zero-- failure
1771  */
1772 static int
1773 twa_flash_firmware(struct twa_softc *sc)
1774 {
1775 	struct twa_request			*tr;
1776 	struct twa_command_download_firmware	*cmd;
1777 	uint32_t				count;
1778 	uint32_t				fw_img_chunk_size;
1779 	uint32_t				this_chunk_size = 0;
1780 	uint32_t				remaining_img_size = 0;
1781 	int					s, error = 0;
1782 	int					i;
1783 
1784 	if ((tr = twa_get_request(sc, 0)) == NULL) {
1785 		/* No free request packets available.  Can't proceed. */
1786 		error = EIO;
1787 		goto out;
1788 	}
1789 
1790 	count = (twa_fw_img_size / 65536);
1791 
1792 	count += ((twa_fw_img_size % 65536) != 0) ? 1 : 0;
1793 
1794 	tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_INTERNAL;
1795 	/* Allocate sufficient memory to hold a chunk of the firmware image. */
1796 	fw_img_chunk_size = ((twa_fw_img_size / count) + 511) & ~511;
1797 
1798 	s = splvm();
1799 	tr->tr_data = (void *)uvm_km_alloc(kmem_map, fw_img_chunk_size, 512,
1800 				UVM_KMF_WIRED);
1801 	splx(s);
1802 
1803 	if (tr->tr_data == NULL) {
1804 		error = ENOMEM;
1805 		goto out;
1806 	}
1807 
1808 	remaining_img_size = twa_fw_img_size;
1809 	cmd = &(tr->tr_command->command.cmd_pkt_7k.download_fw);
1810 
1811 	for (i = 0; i < count; i++) {
1812 		/* Build a cmd pkt for downloading firmware. */
1813 		memset(tr->tr_command, 0, sizeof(struct twa_command_packet));
1814 
1815 		tr->tr_command->cmd_hdr.header_desc.size_header = 128;
1816 
1817 		cmd->opcode = TWA_OP_DOWNLOAD_FIRMWARE;
1818 		cmd->sgl_offset = 2;/* offset in dwords, to the beginning of sg list */
1819 		cmd->size = 2;	/* this field will be updated at data map time */
1820 		cmd->request_id = tr->tr_request_id;
1821 		cmd->unit = 0;
1822 		cmd->status = 0;
1823 		cmd->flags = 0;
1824 		cmd->param = 8;	/* prom image */
1825 
1826 		if (i != (count - 1))
1827 			this_chunk_size = fw_img_chunk_size;
1828 		else	 /* last chunk */
1829 			this_chunk_size = remaining_img_size;
1830 
1831 		remaining_img_size -= this_chunk_size;
1832 
1833 		memset(tr->tr_data, fw_img_chunk_size, 0);
1834 
1835 		memcpy(tr->tr_data, twa_fw_img + (i * fw_img_chunk_size),
1836 			this_chunk_size);
1837 		/*
1838 		 * The next line will effect only the last chunk.
1839 		 */
1840 		tr->tr_length = (this_chunk_size + 511) & ~511;
1841 
1842 		tr->tr_flags |= TWA_CMD_DATA_OUT;
1843 
1844 		error = twa_immediate_request(tr, TWA_REQUEST_TIMEOUT_PERIOD);
1845 
1846 		if (error) {
1847 			if (error == ETIMEDOUT)
1848 				return(error); /* clean-up done by twa_immediate_request */
1849 			break;
1850 		}
1851 		error = cmd->status;
1852 
1853 		if (i != (count - 1)) {
1854 
1855 			/* XXX FreeBSD code doesn't check for no error condition
1856 			 * but based on observation, error seems to return 0
1857 			 */
1858 			if ((error = tr->tr_command->cmd_hdr.status_block.error) == 0) {
1859 				continue;
1860 			} else if ((error = tr->tr_command->cmd_hdr.status_block.error) ==
1861 				TWA_ERROR_MORE_DATA) {
1862 				    continue;
1863 			} else {
1864 				twa_hard_reset(sc);
1865 				break;
1866 			}
1867 		} else	 /* last chunk */
1868 			if (error) {
1869 				printf("%s: firmware flash request failed. error = 0x%x\n",
1870 					 sc->twa_dv.dv_xname, error);
1871 				twa_hard_reset(sc);
1872 			}
1873 	} /* for */
1874 
1875 	if (tr->tr_data) {
1876 		s = splvm();
1877 		uvm_km_free(kmem_map, (vaddr_t)tr->tr_data,
1878 			fw_img_chunk_size, UVM_KMF_WIRED);
1879 		splx(s);
1880 	}
1881 out:
1882 	if (tr)
1883 		twa_release_request(tr);
1884 	return(error);
1885 }
1886 
1887 /*
1888  * Function name:	twa_hard_reset
1889  * Description:		Hard reset the controller.
1890  *
1891  * Input:		sc	-- ptr to per ctlr structure
1892  * Output:		None
1893  * Return value:	0	-- success
1894  *			non-zero-- failure
1895  */
1896 static int
1897 twa_hard_reset(struct twa_softc *sc)
1898 {
1899 	struct twa_request			*tr;
1900 	struct twa_command_reset_firmware	*cmd;
1901 	int					error;
1902 
1903 	if ((tr = twa_get_request(sc, 0)) == NULL)
1904 		return(EIO);
1905 	tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_INTERNAL;
1906 	/* Build a cmd pkt for sending down the hard reset command. */
1907 	tr->tr_command->cmd_hdr.header_desc.size_header = 128;
1908 
1909 	cmd = &(tr->tr_command->command.cmd_pkt_7k.reset_fw);
1910 	cmd->opcode = TWA_OP_RESET_FIRMWARE;
1911 	cmd->size = 2;	/* this field will be updated at data map time */
1912 	cmd->request_id = tr->tr_request_id;
1913 	cmd->unit = 0;
1914 	cmd->status = 0;
1915 	cmd->flags = 0;
1916 	cmd->param = 0;	/* don't reload FPGA logic */
1917 
1918 	tr->tr_data = NULL;
1919 	tr->tr_length = 0;
1920 
1921 	error = twa_immediate_request(tr, TWA_REQUEST_TIMEOUT_PERIOD);
1922 	if (error) {
1923 		printf("%s: hard reset request could not "
1924 			" be posted. error = 0x%x\n", sc->twa_dv.dv_xname, error);
1925 		if (error == ETIMEDOUT)
1926 			return(error); /* clean-up done by twa_immediate_request */
1927 		goto out;
1928 	}
1929 	if ((error = cmd->status)) {
1930 		printf("%s: hard reset request failed. error = 0x%x\n",
1931 			sc->twa_dv.dv_xname, error);
1932 	}
1933 
1934 out:
1935 	if (tr)
1936 		twa_release_request(tr);
1937 	return(error);
1938 }
1939 #endif
1940 
1941 /*
1942  * Function name:	twa_intr
1943  * Description:		Interrupt handler.  Determines the kind of interrupt,
1944  *			and calls the appropriate handler.
1945  *
1946  * Input:		sc	-- ptr to per ctlr structure
1947  * Output:		None
1948  * Return value:	None
1949  */
1950 
1951 static int
1952 twa_intr(void *arg)
1953 {
1954 	int	caught, rv;
1955 	struct twa_softc *sc;
1956 	u_int32_t	status_reg;
1957 	sc = (struct twa_softc *)arg;
1958 
1959 	caught = 0;
1960 	/* Collect current interrupt status. */
1961 	status_reg = twa_inl(sc, TWA_STATUS_REGISTER_OFFSET);
1962 	if (twa_check_ctlr_state(sc, status_reg)) {
1963 		caught = 1;
1964 		goto bail;
1965 	}
1966 	/* Dispatch based on the kind of interrupt. */
1967 	if (status_reg & TWA_STATUS_HOST_INTERRUPT) {
1968 		twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
1969 			TWA_CONTROL_CLEAR_HOST_INTERRUPT);
1970 		caught = 1;
1971 	}
1972 	if ((status_reg & TWA_STATUS_ATTENTION_INTERRUPT) != 0) {
1973 		twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
1974 			TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT);
1975 		rv = twa_fetch_aen(sc);
1976 #ifdef DIAGNOSTIC
1977 		if (rv != 0)
1978 			printf("%s: unable to retrieve AEN (%d)\n",
1979 				sc->twa_dv.dv_xname, rv);
1980 #endif
1981 		caught = 1;
1982 	}
1983 	if (status_reg & TWA_STATUS_COMMAND_INTERRUPT) {
1984 		/* Start any requests that might be in the pending queue. */
1985 		twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
1986 			TWA_CONTROL_MASK_COMMAND_INTERRUPT);
1987 		(void)twa_drain_pending_queue(sc);
1988 		caught = 1;
1989 	}
1990 	if (status_reg & TWA_STATUS_RESPONSE_INTERRUPT) {
1991 		twa_done(sc);
1992 		caught = 1;
1993 	}
1994 bail:
1995 	return (caught);
1996 }
1997 
1998 
1999 /*
2000  * Accept an open operation on the control device.
2001  */
2002 static int
2003 twaopen(dev_t dev, int flag, int mode, struct lwp *l)
2004 {
2005 	struct twa_softc *twa;
2006 
2007 	if ((twa = device_lookup(&twa_cd, minor(dev))) == NULL)
2008 		return (ENXIO);
2009 	if ((twa->twa_sc_flags & TWA_STATE_OPEN) != 0)
2010 		return (EBUSY);
2011 
2012 	twa->twa_sc_flags |= TWA_STATE_OPEN;
2013 
2014 	return (0);
2015 }
2016 
2017 
2018 /*
2019  * Accept the last close on the control device.
2020  */
2021 static int
2022 twaclose(dev_t dev, int flag, int mode, struct lwp *l)
2023 {
2024 	struct twa_softc *twa;
2025 
2026 	twa = device_lookup(&twa_cd, minor(dev));
2027 	twa->twa_sc_flags &= ~TWA_STATE_OPEN;
2028 	return (0);
2029 }
2030 
2031 
2032 /*
2033  * Function name:	twaioctl
2034  * Description:		ioctl handler.
2035  *
2036  * Input:		sc	-- ptr to per ctlr structure
2037  *			cmd	-- ioctl cmd
2038  *			buf	-- ptr to buffer in kernel memory, which is
2039  *				   a copy of the input buffer in user-space
2040  * Output:		buf	-- ptr to buffer in kernel memory, which will
2041  *				   be copied of the output buffer in user-space
2042  * Return value:	0	-- success
2043  *			non-zero-- failure
2044  */
2045 static int
2046 twaioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct lwp *l)
2047 {
2048 	struct twa_softc *sc;
2049 	struct twa_ioctl_9k	*user_buf = (struct twa_ioctl_9k *)data;
2050 	struct tw_cl_event_packet event_buf;
2051 	struct twa_request 	*tr = 0;
2052 	int32_t			event_index = 0;
2053 	int32_t			start_index;
2054 	int			s, error = 0;
2055 
2056 	sc = device_lookup(&twa_cd, minor(dev));
2057 
2058 	switch (cmd) {
2059 	case TW_OSL_IOCTL_FIRMWARE_PASS_THROUGH:
2060 	{
2061 		struct twa_command_packet	*cmdpkt;
2062 		u_int32_t			data_buf_size_adjusted;
2063 
2064 		/* Get a request packet */
2065 		tr = twa_get_request_wait(sc, 0);
2066 		KASSERT(tr != NULL);
2067 		/*
2068 		 * Make sure that the data buffer sent to firmware is a
2069 		 * 512 byte multiple in size.
2070 		 */
2071 		data_buf_size_adjusted =
2072 			(user_buf->twa_drvr_pkt.buffer_length + 511) & ~511;
2073 
2074 		if ((tr->tr_length = data_buf_size_adjusted)) {
2075 			if ((tr->tr_data = malloc(data_buf_size_adjusted,
2076 			    M_DEVBUF, M_WAITOK)) == NULL) {
2077 				error = ENOMEM;
2078 				goto fw_passthru_done;
2079 			}
2080 			/* Copy the payload. */
2081 			if ((error = copyin((void *) (user_buf->pdata),
2082 				(void *) (tr->tr_data),
2083 				user_buf->twa_drvr_pkt.buffer_length)) != 0) {
2084 					goto fw_passthru_done;
2085 			}
2086 			tr->tr_flags |= TWA_CMD_DATA_IN | TWA_CMD_DATA_OUT;
2087 		}
2088 		tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_IOCTL;
2089 		cmdpkt = tr->tr_command;
2090 
2091 		/* Copy the command packet. */
2092 		memcpy(cmdpkt, &(user_buf->twa_cmd_pkt),
2093 			sizeof(struct twa_command_packet));
2094 		cmdpkt->command.cmd_pkt_7k.generic.request_id =
2095 			tr->tr_request_id;
2096 
2097 		/* Send down the request, and wait for it to complete. */
2098 		if ((error = twa_wait_request(tr, TWA_REQUEST_TIMEOUT_PERIOD))) 		{
2099 			if (error == ETIMEDOUT)
2100 				break; /* clean-up done by twa_wait_request */
2101 			goto fw_passthru_done;
2102 		}
2103 
2104 		/* Copy the command packet back into user space. */
2105 		memcpy(&user_buf->twa_cmd_pkt, cmdpkt,
2106 			sizeof(struct twa_command_packet));
2107 
2108 		/* If there was a payload, copy it back too. */
2109 		if (tr->tr_length)
2110 			error = copyout(tr->tr_data, user_buf->pdata,
2111 					user_buf->twa_drvr_pkt.buffer_length);
2112 fw_passthru_done:
2113 		/* Free resources. */
2114 		if (tr->tr_data)
2115 			free(tr->tr_data, M_DEVBUF);
2116 
2117 		if (tr)
2118 			twa_release_request(tr);
2119 		break;
2120 	}
2121 
2122 	case TW_OSL_IOCTL_SCAN_BUS:
2123 		twa_request_bus_scan(sc);
2124 		break;
2125 
2126 	case TW_CL_IOCTL_GET_FIRST_EVENT:
2127 		if (sc->twa_aen_queue_wrapped) {
2128 			if (sc->twa_aen_queue_overflow) {
2129 				/*
2130 				 * The aen queue has wrapped, even before some
2131 				 * events have been retrieved.  Let the caller
2132 				 * know that he missed out on some AEN's.
2133 				 */
2134 				user_buf->twa_drvr_pkt.status =
2135 					TWA_ERROR_AEN_OVERFLOW;
2136 				sc->twa_aen_queue_overflow = FALSE;
2137 			} else
2138 				user_buf->twa_drvr_pkt.status = 0;
2139 			event_index = sc->twa_aen_head;
2140 		} else {
2141 			if (sc->twa_aen_head == sc->twa_aen_tail) {
2142 				user_buf->twa_drvr_pkt.status =
2143 					TWA_ERROR_AEN_NO_EVENTS;
2144 				break;
2145 			}
2146 			user_buf->twa_drvr_pkt.status = 0;
2147 			event_index = sc->twa_aen_tail;	/* = 0 */
2148 		}
2149 		if ((error = copyout(sc->twa_aen_queue[event_index],
2150 			user_buf->pdata, sizeof(struct tw_cl_event_packet))) != 0)
2151 				(sc->twa_aen_queue[event_index])->retrieved =
2152 					TWA_AEN_RETRIEVED;
2153 		break;
2154 
2155 
2156 	case TW_CL_IOCTL_GET_LAST_EVENT:
2157 
2158 		if (sc->twa_aen_queue_wrapped) {
2159 			if (sc->twa_aen_queue_overflow) {
2160 				/*
2161 				 * The aen queue has wrapped, even before some
2162 				 * events have been retrieved.  Let the caller
2163 				 * know that he missed out on some AEN's.
2164 				 */
2165 				user_buf->twa_drvr_pkt.status =
2166 					TWA_ERROR_AEN_OVERFLOW;
2167 				sc->twa_aen_queue_overflow = FALSE;
2168 			} else
2169 				user_buf->twa_drvr_pkt.status = 0;
2170 		} else {
2171 			if (sc->twa_aen_head == sc->twa_aen_tail) {
2172 				user_buf->twa_drvr_pkt.status =
2173 					TWA_ERROR_AEN_NO_EVENTS;
2174 				break;
2175 			}
2176 			user_buf->twa_drvr_pkt.status = 0;
2177 		}
2178 		event_index = (sc->twa_aen_head - 1 + TWA_Q_LENGTH) % TWA_Q_LENGTH;
2179 		if ((error = copyout(sc->twa_aen_queue[event_index], user_buf->pdata,
2180 					sizeof(struct tw_cl_event_packet))) != 0)
2181 
2182 		(sc->twa_aen_queue[event_index])->retrieved =
2183 			TWA_AEN_RETRIEVED;
2184 		break;
2185 
2186 
2187 	case TW_CL_IOCTL_GET_NEXT_EVENT:
2188 
2189 		user_buf->twa_drvr_pkt.status = 0;
2190 		if (sc->twa_aen_queue_wrapped) {
2191 
2192 			if (sc->twa_aen_queue_overflow) {
2193 				/*
2194 				 * The aen queue has wrapped, even before some
2195 				 * events have been retrieved.  Let the caller
2196 				 * know that he missed out on some AEN's.
2197 				 */
2198 				user_buf->twa_drvr_pkt.status =
2199 					TWA_ERROR_AEN_OVERFLOW;
2200 				sc->twa_aen_queue_overflow = FALSE;
2201 			}
2202 			start_index = sc->twa_aen_head;
2203 		} else {
2204 			if (sc->twa_aen_head == sc->twa_aen_tail) {
2205 				user_buf->twa_drvr_pkt.status =
2206 					TWA_ERROR_AEN_NO_EVENTS;
2207 				break;
2208 			}
2209 			start_index = sc->twa_aen_tail;	/* = 0 */
2210 		}
2211 		error = copyin(user_buf->pdata, &event_buf,
2212 				sizeof(struct tw_cl_event_packet));
2213 
2214 		event_index = (start_index + event_buf.sequence_id -
2215 				(sc->twa_aen_queue[start_index])->sequence_id + 1)
2216 				% TWA_Q_LENGTH;
2217 
2218 		if (! ((sc->twa_aen_queue[event_index])->sequence_id >
2219 						event_buf.sequence_id)) {
2220 			if (user_buf->twa_drvr_pkt.status == TWA_ERROR_AEN_OVERFLOW)
2221 				sc->twa_aen_queue_overflow = TRUE; /* so we report the overflow next time */
2222 			user_buf->twa_drvr_pkt.status =
2223 				TWA_ERROR_AEN_NO_EVENTS;
2224 			break;
2225 		}
2226 		if ((error = copyout(sc->twa_aen_queue[event_index], user_buf->pdata,
2227 					sizeof(struct tw_cl_event_packet))) != 0)
2228 
2229 		(sc->twa_aen_queue[event_index])->retrieved =
2230 			TWA_AEN_RETRIEVED;
2231 		break;
2232 
2233 
2234 	case TW_CL_IOCTL_GET_PREVIOUS_EVENT:
2235 
2236 		user_buf->twa_drvr_pkt.status = 0;
2237 		if (sc->twa_aen_queue_wrapped) {
2238 			if (sc->twa_aen_queue_overflow) {
2239 				/*
2240 				 * The aen queue has wrapped, even before some
2241 				 * events have been retrieved.  Let the caller
2242 				 * know that he missed out on some AEN's.
2243 				 */
2244 				user_buf->twa_drvr_pkt.status =
2245 					TWA_ERROR_AEN_OVERFLOW;
2246 				sc->twa_aen_queue_overflow = FALSE;
2247 			}
2248 			start_index = sc->twa_aen_head;
2249 		} else {
2250 			if (sc->twa_aen_head == sc->twa_aen_tail) {
2251 				user_buf->twa_drvr_pkt.status =
2252 					TWA_ERROR_AEN_NO_EVENTS;
2253 				break;
2254 			}
2255 			start_index = sc->twa_aen_tail;	/* = 0 */
2256 		}
2257 		if ((error = copyin(user_buf->pdata, &event_buf,
2258 				sizeof(struct tw_cl_event_packet))) != 0)
2259 
2260 		event_index = (start_index + event_buf.sequence_id -
2261 			(sc->twa_aen_queue[start_index])->sequence_id - 1) % TWA_Q_LENGTH;
2262 		if (! ((sc->twa_aen_queue[event_index])->sequence_id <
2263 			event_buf.sequence_id)) {
2264 			if (user_buf->twa_drvr_pkt.status == TWA_ERROR_AEN_OVERFLOW)
2265 				sc->twa_aen_queue_overflow = TRUE; /* so we report the overflow next time */
2266 			user_buf->twa_drvr_pkt.status =
2267 				TWA_ERROR_AEN_NO_EVENTS;
2268 			break;
2269 		}
2270 		if ((error = copyout(sc->twa_aen_queue [event_index], user_buf->pdata,
2271  				sizeof(struct tw_cl_event_packet))) != 0)
2272 				aprint_error("%s: get_previous: Could not copyout to "
2273 					"event_buf. error = %x\n", sc->twa_dv.dv_xname, error);
2274 		(sc->twa_aen_queue[event_index])->retrieved = TWA_AEN_RETRIEVED;
2275 		break;
2276 
2277 	case TW_CL_IOCTL_GET_LOCK:
2278 	{
2279 		struct tw_cl_lock_packet	twa_lock;
2280 
2281 		copyin(user_buf->pdata, &twa_lock,
2282 				sizeof(struct tw_cl_lock_packet));
2283 		s = splbio();
2284 		if ((sc->twa_ioctl_lock.lock == TWA_LOCK_FREE) ||
2285 			(twa_lock.force_flag) ||
2286 			(time.tv_sec >= sc->twa_ioctl_lock.timeout)) {
2287 
2288 			sc->twa_ioctl_lock.lock = TWA_LOCK_HELD;
2289 			sc->twa_ioctl_lock.timeout = time.tv_sec +
2290 				(twa_lock.timeout_msec / 1000);
2291 			twa_lock.time_remaining_msec = twa_lock.timeout_msec;
2292 			user_buf->twa_drvr_pkt.status = 0;
2293 		} else {
2294 			twa_lock.time_remaining_msec =
2295 				(sc->twa_ioctl_lock.timeout - time.tv_sec) *
2296 				1000;
2297 			user_buf->twa_drvr_pkt.status =
2298 					TWA_ERROR_IOCTL_LOCK_ALREADY_HELD;
2299 		}
2300 		splx(s);
2301 		copyout(&twa_lock, user_buf->pdata,
2302 				sizeof(struct tw_cl_lock_packet));
2303 		break;
2304 	}
2305 
2306 	case TW_CL_IOCTL_RELEASE_LOCK:
2307 		s = splbio();
2308 		if (sc->twa_ioctl_lock.lock == TWA_LOCK_FREE) {
2309 			user_buf->twa_drvr_pkt.status =
2310 				TWA_ERROR_IOCTL_LOCK_NOT_HELD;
2311 		} else {
2312 			sc->twa_ioctl_lock.lock = TWA_LOCK_FREE;
2313 			user_buf->twa_drvr_pkt.status = 0;
2314 		}
2315 		splx(s);
2316 		break;
2317 
2318 	case TW_CL_IOCTL_GET_COMPATIBILITY_INFO:
2319 	{
2320 		struct tw_cl_compatibility_packet	comp_pkt;
2321 
2322 		memcpy(comp_pkt.driver_version, TWA_DRIVER_VERSION_STRING,
2323 					sizeof(TWA_DRIVER_VERSION_STRING));
2324 		comp_pkt.working_srl = sc->working_srl;
2325 		comp_pkt.working_branch = sc->working_branch;
2326 		comp_pkt.working_build = sc->working_build;
2327 		user_buf->twa_drvr_pkt.status = 0;
2328 
2329 		/* Copy compatibility information to user space. */
2330 		copyout(&comp_pkt, user_buf->pdata,
2331 				min(sizeof(struct tw_cl_compatibility_packet),
2332 					user_buf->twa_drvr_pkt.buffer_length));
2333 		break;
2334 	}
2335 
2336 	case TWA_IOCTL_GET_UNITNAME:	/* WASABI EXTENSION */
2337 	{
2338 		struct twa_unitname	*tn;
2339 		struct twa_drive	*tdr;
2340 
2341 		tn = (struct twa_unitname *)data;
2342 			/* XXX mutex */
2343 		if (tn->tn_unit < 0 || tn->tn_unit >= TWA_MAX_UNITS)
2344 			return (EINVAL);
2345 		tdr = &sc->sc_units[tn->tn_unit];
2346 		if (tdr->td_dev == NULL)
2347 			tn->tn_name[0] = '\0';
2348 		else
2349 			strlcpy(tn->tn_name, tdr->td_dev->dv_xname,
2350 			    sizeof(tn->tn_name));
2351 		return (0);
2352 	}
2353 
2354 	default:
2355 		/* Unknown opcode. */
2356 		error = ENOTTY;
2357 	}
2358 
2359 	return(error);
2360 }
2361 
2362 
2363 const struct cdevsw twa_cdevsw = {
2364 	twaopen, twaclose, noread, nowrite, twaioctl,
2365 	nostop, notty, nopoll, nommap,
2366 };
2367 
2368 
2369 /*
2370  * Function name:	twa_get_param
2371  * Description:		Get a firmware parameter.
2372  *
2373  * Input:		sc		-- ptr to per ctlr structure
2374  *			table_id	-- parameter table #
2375  *			param_id	-- index of the parameter in the table
2376  *			param_size	-- size of the parameter in bytes
2377  *			callback	-- ptr to function, if any, to be called
2378  *					back on completion; NULL if no callback.
2379  * Output:		None
2380  * Return value:	ptr to param structure	-- success
2381  *			NULL			-- failure
2382  */
2383 static int
2384 twa_get_param(struct twa_softc *sc, int table_id, int param_id,
2385 		size_t param_size, void (* callback)(struct twa_request *tr),
2386 		struct twa_param_9k **param)
2387 {
2388 	int			rv = 0;
2389 	struct twa_request	*tr;
2390 	union twa_command_7k	*cmd;
2391 
2392 	/* Get a request packet. */
2393 	if ((tr = twa_get_request(sc, 0)) == NULL) {
2394 		rv = EAGAIN;
2395 		goto out;
2396 	}
2397 
2398 	tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_INTERNAL;
2399 
2400 	/* Allocate memory to read data into. */
2401 	if ((*param = (struct twa_param_9k *)
2402 		malloc(TWA_SECTOR_SIZE, M_DEVBUF, M_NOWAIT)) == NULL) {
2403 		rv = ENOMEM;
2404 		goto out;
2405 	}
2406 
2407 	memset(*param, 0, sizeof(struct twa_param_9k) - 1 + param_size);
2408 	tr->tr_data = *param;
2409 	tr->tr_length = TWA_SECTOR_SIZE;
2410 	tr->tr_flags = TWA_CMD_DATA_IN | TWA_CMD_DATA_OUT;
2411 
2412 	/* Build the cmd pkt. */
2413 	cmd = &(tr->tr_command->command.cmd_pkt_7k);
2414 
2415 	tr->tr_command->cmd_hdr.header_desc.size_header = 128;
2416 
2417 	cmd->param.opcode = TWA_OP_GET_PARAM;
2418 	cmd->param.sgl_offset = 2;
2419 	cmd->param.size = 2;
2420 	cmd->param.request_id = tr->tr_request_id;
2421 	cmd->param.unit = 0;
2422 	cmd->param.param_count = 1;
2423 
2424 	/* Specify which parameter we need. */
2425 	(*param)->table_id = table_id | TWA_9K_PARAM_DESCRIPTOR;
2426 	(*param)->parameter_id = param_id;
2427 	(*param)->parameter_size_bytes = param_size;
2428 
2429 	/* Submit the command. */
2430 	if (callback == NULL) {
2431 		/* There's no call back; wait till the command completes. */
2432 		rv = twa_immediate_request(tr, TWA_REQUEST_TIMEOUT_PERIOD);
2433 
2434 		if (rv != 0)
2435 			goto out;
2436 
2437 		if ((rv = cmd->param.status) != 0) {
2438 		     /* twa_drain_complete_queue will have done the unmapping */
2439 		     goto out;
2440 		}
2441 		twa_release_request(tr);
2442 		return (rv);
2443 	} else {
2444 		/* There's a call back.  Simply submit the command. */
2445 		tr->tr_callback = callback;
2446 		rv = twa_map_request(tr);
2447 		return (rv);
2448 	}
2449 out:
2450 	if (tr)
2451 		twa_release_request(tr);
2452 	return(rv);
2453 }
2454 
2455 
2456 /*
2457  * Function name:	twa_set_param
2458  * Description:		Set a firmware parameter.
2459  *
2460  * Input:		sc		-- ptr to per ctlr structure
2461  *			table_id	-- parameter table #
2462  *			param_id	-- index of the parameter in the table
2463  *			param_size	-- size of the parameter in bytes
2464  *			callback	-- ptr to function, if any, to be called
2465  *					back on completion; NULL if no callback.
2466  * Output:		None
2467  * Return value:	0	-- success
2468  *			non-zero-- failure
2469  */
2470 static int
2471 twa_set_param(struct twa_softc *sc, int table_id,
2472 			int param_id, int param_size, void *data,
2473 			void (* callback)(struct twa_request *tr))
2474 {
2475 	struct twa_request	*tr;
2476 	union twa_command_7k	*cmd;
2477 	struct twa_param_9k	*param = NULL;
2478 	int			error = ENOMEM;
2479 
2480 	tr = twa_get_request(sc, 0);
2481 	if (tr == NULL)
2482 		return (EAGAIN);
2483 
2484 	tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_INTERNAL;
2485 
2486 	/* Allocate memory to send data using. */
2487 	if ((param = (struct twa_param_9k *)
2488 			malloc(TWA_SECTOR_SIZE, M_DEVBUF, M_NOWAIT)) == NULL)
2489 		goto out;
2490 	memset(param, 0, sizeof(struct twa_param_9k) - 1 + param_size);
2491 	tr->tr_data = param;
2492 	tr->tr_length = TWA_SECTOR_SIZE;
2493 	tr->tr_flags = TWA_CMD_DATA_IN | TWA_CMD_DATA_OUT;
2494 
2495 	/* Build the cmd pkt. */
2496 	cmd = &(tr->tr_command->command.cmd_pkt_7k);
2497 
2498 	tr->tr_command->cmd_hdr.header_desc.size_header = 128;
2499 
2500 	cmd->param.opcode = TWA_OP_SET_PARAM;
2501 	cmd->param.sgl_offset = 2;
2502 	cmd->param.size = 2;
2503 	cmd->param.request_id = tr->tr_request_id;
2504 	cmd->param.unit = 0;
2505 	cmd->param.param_count = 1;
2506 
2507 	/* Specify which parameter we want to set. */
2508 	param->table_id = table_id | TWA_9K_PARAM_DESCRIPTOR;
2509 	param->parameter_id = param_id;
2510 	param->parameter_size_bytes = param_size;
2511 	memcpy(param->data, data, param_size);
2512 
2513 	/* Submit the command. */
2514 	if (callback == NULL) {
2515 		/* There's no call back;  wait till the command completes. */
2516 		error = twa_immediate_request(tr, TWA_REQUEST_TIMEOUT_PERIOD);
2517 		if (error == ETIMEDOUT)
2518 			return(error); /* clean-up done by twa_immediate_request */
2519 		if (error)
2520 			goto out;
2521 		if ((error = cmd->param.status)) {
2522 			goto out; /* twa_drain_complete_queue will have done the unmapping */
2523 		}
2524 		free(param, M_DEVBUF);
2525 		twa_release_request(tr);
2526 		return(error);
2527 	} else {
2528 		/* There's a call back.  Simply submit the command. */
2529 		tr->tr_callback = callback;
2530 		if ((error = twa_map_request(tr)))
2531 			goto out;
2532 
2533 		return (0);
2534 	}
2535 out:
2536 	if (param)
2537 		free(param, M_DEVBUF);
2538 	if (tr)
2539 		twa_release_request(tr);
2540 	return(error);
2541 }
2542 
2543 
2544 /*
2545  * Function name:	twa_init_connection
2546  * Description:		Send init_connection cmd to firmware
2547  *
2548  * Input:		sc		-- ptr to per ctlr structure
2549  *			message_credits	-- max # of requests that we might send
2550  *					 down simultaneously.  This will be
2551  *					 typically set to 256 at init-time or
2552  *					after a reset, and to 1 at shutdown-time
2553  *			set_features	-- indicates if we intend to use 64-bit
2554  *					sg, also indicates if we want to do a
2555  *					basic or an extended init_connection;
2556  *
2557  * Note: The following input/output parameters are valid, only in case of an
2558  *		extended init_connection:
2559  *
2560  *			current_fw_srl		-- srl of fw we are bundled
2561  *						with, if any; 0 otherwise
2562  *			current_fw_arch_id	-- arch_id of fw we are bundled
2563  *						with, if any; 0 otherwise
2564  *			current_fw_branch	-- branch # of fw we are bundled
2565  *						with, if any; 0 otherwise
2566  *			current_fw_build	-- build # of fw we are bundled
2567  *						with, if any; 0 otherwise
2568  * Output:		fw_on_ctlr_srl		-- srl of fw on ctlr
2569  *			fw_on_ctlr_arch_id	-- arch_id of fw on ctlr
2570  *			fw_on_ctlr_branch	-- branch # of fw on ctlr
2571  *			fw_on_ctlr_build	-- build # of fw on ctlr
2572  *			init_connect_result	-- result bitmap of fw response
2573  * Return value:	0	-- success
2574  *			non-zero-- failure
2575  */
2576 static int
2577 twa_init_connection(struct twa_softc *sc, u_int16_t message_credits,
2578 			u_int32_t set_features, u_int16_t current_fw_srl,
2579 			u_int16_t current_fw_arch_id, u_int16_t current_fw_branch,
2580 			u_int16_t current_fw_build, u_int16_t *fw_on_ctlr_srl,
2581 			u_int16_t *fw_on_ctlr_arch_id, u_int16_t *fw_on_ctlr_branch,
2582 			u_int16_t *fw_on_ctlr_build, u_int32_t *init_connect_result)
2583 {
2584 	struct twa_request		*tr;
2585 	struct twa_command_init_connect	*init_connect;
2586 	int				error = 1;
2587 
2588 	/* Get a request packet. */
2589 	if ((tr = twa_get_request(sc, 0)) == NULL)
2590 		goto out;
2591 	tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_INTERNAL;
2592 	/* Build the cmd pkt. */
2593 	init_connect = &(tr->tr_command->command.cmd_pkt_7k.init_connect);
2594 
2595 	tr->tr_command->cmd_hdr.header_desc.size_header = 128;
2596 
2597 	init_connect->opcode = TWA_OP_INIT_CONNECTION;
2598    	init_connect->request_id = tr->tr_request_id;
2599 	init_connect->message_credits = message_credits;
2600 	init_connect->features = set_features;
2601 	if (TWA_64BIT_ADDRESSES) {
2602 		printf("64 bit addressing supported for scatter/gather list\n");
2603 		init_connect->features |= TWA_64BIT_SG_ADDRESSES;
2604 	}
2605 	if (set_features & TWA_EXTENDED_INIT_CONNECT) {
2606 		/*
2607 		 * Fill in the extra fields needed for
2608 		 * an extended init_connect.
2609 		 */
2610 		init_connect->size = 6;
2611 		init_connect->fw_srl = current_fw_srl;
2612 		init_connect->fw_arch_id = current_fw_arch_id;
2613 		init_connect->fw_branch = current_fw_branch;
2614 	} else
2615 		init_connect->size = 3;
2616 
2617 	/* Submit the command, and wait for it to complete. */
2618 	error = twa_immediate_request(tr, TWA_REQUEST_TIMEOUT_PERIOD);
2619 	if (error == ETIMEDOUT)
2620 		return(error); /* clean-up done by twa_immediate_request */
2621 	if (error)
2622 		goto out;
2623 	if ((error = init_connect->status)) {
2624 		goto out; /* twa_drain_complete_queue will have done the unmapping */
2625 	}
2626 	if (set_features & TWA_EXTENDED_INIT_CONNECT) {
2627 		*fw_on_ctlr_srl = init_connect->fw_srl;
2628 		*fw_on_ctlr_arch_id = init_connect->fw_arch_id;
2629 		*fw_on_ctlr_branch = init_connect->fw_branch;
2630 		*fw_on_ctlr_build = init_connect->fw_build;
2631 		*init_connect_result = init_connect->result;
2632 	}
2633 	twa_release_request(tr);
2634 	return(error);
2635 
2636 out:
2637 	if (tr)
2638 		twa_release_request(tr);
2639 	return(error);
2640 }
2641 
2642 
2643 static int
2644 twa_reset(struct twa_softc *sc)
2645 {
2646 	int	s;
2647 	int	error = 0;
2648 
2649 	/*
2650 	 * Disable interrupts from the controller, and mask any
2651 	 * accidental entry into our interrupt handler.
2652 	 */
2653 	twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
2654 		TWA_CONTROL_DISABLE_INTERRUPTS);
2655 
2656 	s = splbio();
2657 
2658 	/* Soft reset the controller. */
2659 	if ((error = twa_soft_reset(sc)))
2660 		goto out;
2661 
2662 	/* Re-establish logical connection with the controller. */
2663 	if ((error = twa_init_connection(sc, TWA_INIT_MESSAGE_CREDITS,
2664 					0, 0, 0, 0, 0,
2665 					NULL, NULL, NULL, NULL, NULL))) {
2666 		goto out;
2667 	}
2668 	/*
2669 	 * Complete all requests in the complete queue; error back all requests
2670 	 * in the busy queue.  Any internal requests will be simply freed.
2671 	 * Re-submit any requests in the pending queue.
2672 	 */
2673 	twa_drain_busy_queue(sc);
2674 
2675 out:
2676 	splx(s);
2677 	/*
2678 	 * Enable interrupts, and also clear attention and response interrupts.
2679 	 */
2680 	twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
2681 		TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT |
2682 		TWA_CONTROL_UNMASK_RESPONSE_INTERRUPT |
2683 		TWA_CONTROL_ENABLE_INTERRUPTS);
2684 	return(error);
2685 }
2686 
2687 
2688 static int
2689 twa_soft_reset(struct twa_softc *sc)
2690 {
2691 	u_int32_t	status_reg;
2692 
2693 	twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
2694 			TWA_CONTROL_ISSUE_SOFT_RESET |
2695 			TWA_CONTROL_CLEAR_HOST_INTERRUPT |
2696 			TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT |
2697 			TWA_CONTROL_MASK_COMMAND_INTERRUPT |
2698 			TWA_CONTROL_MASK_RESPONSE_INTERRUPT |
2699 			TWA_CONTROL_DISABLE_INTERRUPTS);
2700 
2701 	if (twa_wait_status(sc, TWA_STATUS_MICROCONTROLLER_READY |
2702 				TWA_STATUS_ATTENTION_INTERRUPT, 30)) {
2703 		aprint_error("%s: no attention interrupt after reset.\n",
2704 			sc->twa_dv.dv_xname);
2705 		return(1);
2706 	}
2707 	twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
2708 		TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT);
2709 
2710 	if (twa_drain_response_queue(sc)) {
2711 		aprint_error("%s: cannot drain response queue.\n",sc->twa_dv.dv_xname);
2712 		return(1);
2713 	}
2714 	if (twa_drain_aen_queue(sc)) {
2715 		aprint_error("%s: cannot drain AEN queue.\n", sc->twa_dv.dv_xname);
2716 		return(1);
2717 	}
2718 	if (twa_find_aen(sc, TWA_AEN_SOFT_RESET)) {
2719 		aprint_error("%s: reset not reported by controller.\n",
2720 			 sc->twa_dv.dv_xname);
2721 		return(1);
2722 	}
2723 	status_reg = twa_inl(sc, TWA_STATUS_REGISTER_OFFSET);
2724 	if (TWA_STATUS_ERRORS(status_reg) ||
2725 				twa_check_ctlr_state(sc, status_reg)) {
2726 		aprint_error("%s: controller errors detected.\n", sc->twa_dv.dv_xname);
2727 		return(1);
2728 	}
2729 	return(0);
2730 }
2731 
2732 
2733 static int
2734 twa_wait_status(struct twa_softc *sc, u_int32_t status, u_int32_t timeout)
2735 {
2736 	struct timeval		t1;
2737 	time_t		end_time;
2738 	u_int32_t	status_reg;
2739 
2740 	timeout = (timeout * 1000 * 100);
2741 
2742 	microtime(&t1);
2743 
2744 	end_time = t1.tv_usec + timeout;
2745 
2746 	do {
2747 		status_reg = twa_inl(sc, TWA_STATUS_REGISTER_OFFSET);
2748 		if ((status_reg & status) == status)/* got the required bit(s)? */
2749 			return(0);
2750 		DELAY(100000);
2751 		microtime(&t1);
2752 	} while (t1.tv_usec <= end_time);
2753 
2754 	return(1);
2755 }
2756 
2757 
2758 static int
2759 twa_fetch_aen(struct twa_softc *sc)
2760 {
2761 	struct twa_request	*tr;
2762 	int			s, error = 0;
2763 
2764 	s = splbio();
2765 
2766 	if ((tr = twa_get_request(sc, TWA_CMD_AEN)) == NULL)
2767 		return(EIO);
2768 	tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_INTERNAL;
2769 	tr->tr_callback = twa_aen_callback;
2770 	tr->tr_data = malloc(TWA_SECTOR_SIZE, M_DEVBUF, M_NOWAIT);
2771 	if (twa_request_sense(tr, 0) != 0) {
2772 		if (tr->tr_data)
2773 			free(tr->tr_data, M_DEVBUF);
2774 		twa_release_request(tr);
2775 		error = 1;
2776 	}
2777 	splx(s);
2778 
2779 	return(error);
2780 }
2781 
2782 
2783 
2784 /*
2785  * Function name:	twa_aen_callback
2786  * Description:		Callback for requests to fetch AEN's.
2787  *
2788  * Input:		tr	-- ptr to completed request pkt
2789  * Output:		None
2790  * Return value:	None
2791  */
2792 static void
2793 twa_aen_callback(struct twa_request *tr)
2794 {
2795 	int i;
2796 	int fetch_more_aens = 0;
2797 	struct twa_softc		*sc = tr->tr_sc;
2798 	struct twa_command_header	*cmd_hdr =
2799 		(struct twa_command_header *)(tr->tr_data);
2800 	struct twa_command_9k		*cmd =
2801 		&(tr->tr_command->command.cmd_pkt_9k);
2802 
2803 	if (! cmd->status) {
2804 		if ((tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_9K) &&
2805 			(cmd->cdb[0] == 0x3 /* REQUEST_SENSE */))
2806 			if (twa_enqueue_aen(sc, cmd_hdr)
2807 				!= TWA_AEN_QUEUE_EMPTY)
2808 				fetch_more_aens = 1;
2809 	} else {
2810 		cmd_hdr->err_specific_desc[sizeof(cmd_hdr->err_specific_desc) - 1] = '\0';
2811 		for (i = 0; i < 18; i++)
2812 			printf("%x\t", tr->tr_command->cmd_hdr.sense_data[i]);
2813 
2814 		printf(""); /* print new line */
2815 
2816 		for (i = 0; i < 128; i++)
2817 			printf("%x\t", ((int8_t *)(tr->tr_data))[i]);
2818 	}
2819 	if (tr->tr_data)
2820 		free(tr->tr_data, M_DEVBUF);
2821 	twa_release_request(tr);
2822 
2823 	if (fetch_more_aens)
2824 		twa_fetch_aen(sc);
2825 }
2826 
2827 
2828 /*
2829  * Function name:	twa_enqueue_aen
2830  * Description:		Queues AEN's to be supplied to user-space tools on request.
2831  *
2832  * Input:		sc	-- ptr to per ctlr structure
2833  *			cmd_hdr	-- ptr to hdr of fw cmd pkt, from where the AEN
2834  *				   details can be retrieved.
2835  * Output:		None
2836  * Return value:	None
2837  */
2838 static uint16_t
2839 twa_enqueue_aen(struct twa_softc *sc, struct twa_command_header *cmd_hdr)
2840 {
2841 	int			rv, s;
2842 	struct tw_cl_event_packet *event;
2843 	uint16_t		aen_code;
2844 	unsigned long		sync_time;
2845 
2846 	s = splbio();
2847 	aen_code = cmd_hdr->status_block.error;
2848 
2849 	switch (aen_code) {
2850 	case TWA_AEN_SYNC_TIME_WITH_HOST:
2851 
2852 		sync_time = (time.tv_sec - (3 * 86400)) % 604800;
2853 		rv = twa_set_param(sc, TWA_PARAM_TIME_TABLE,
2854 				TWA_PARAM_TIME_SchedulerTime, 4,
2855 				&sync_time, twa_aen_callback);
2856 #ifdef DIAGNOSTIC
2857 		if (rv != 0)
2858 			printf("%s: unable to sync time with ctlr\n",
2859 				sc->twa_dv.dv_xname);
2860 #endif
2861 		break;
2862 
2863 	case TWA_AEN_QUEUE_EMPTY:
2864 		break;
2865 
2866 	default:
2867 		/* Queue the event. */
2868 		event = sc->twa_aen_queue[sc->twa_aen_head];
2869 		if (event->retrieved == TWA_AEN_NOT_RETRIEVED)
2870 			sc->twa_aen_queue_overflow = TRUE;
2871 		event->severity =
2872 			cmd_hdr->status_block.substatus_block.severity;
2873 		event->time_stamp_sec = time.tv_sec;
2874 		event->aen_code = aen_code;
2875 		event->retrieved = TWA_AEN_NOT_RETRIEVED;
2876 		event->sequence_id = ++(sc->twa_current_sequence_id);
2877 		cmd_hdr->err_specific_desc[sizeof(cmd_hdr->err_specific_desc) - 1] = '\0';
2878 		event->parameter_len = strlen(cmd_hdr->err_specific_desc);
2879 		memcpy(event->parameter_data, cmd_hdr->err_specific_desc,
2880 			event->parameter_len);
2881 
2882 		if (event->severity < TWA_AEN_SEVERITY_DEBUG) {
2883 			printf("%s: AEN 0x%04X: %s: %s: %s\n",
2884 				sc->twa_dv.dv_xname,
2885 				aen_code,
2886 				twa_aen_severity_table[event->severity],
2887 				twa_find_msg_string(twa_aen_table, aen_code),
2888 				event->parameter_data);
2889 		}
2890 
2891 		if ((sc->twa_aen_head + 1) == TWA_Q_LENGTH)
2892 			sc->twa_aen_queue_wrapped = TRUE;
2893 		sc->twa_aen_head = (sc->twa_aen_head + 1) % TWA_Q_LENGTH;
2894 		break;
2895 	} /* switch */
2896 	splx(s);
2897 
2898 	return (aen_code);
2899 }
2900 
2901 
2902 
2903 /*
2904  * Function name:	twa_find_aen
2905  * Description:		Reports whether a given AEN ever occurred.
2906  *
2907  * Input:		sc	-- ptr to per ctlr structure
2908  *			aen_code-- AEN to look for
2909  * Output:		None
2910  * Return value:	0	-- success
2911  *			non-zero-- failure
2912  */
2913 static int
2914 twa_find_aen(struct twa_softc *sc, u_int16_t aen_code)
2915 {
2916 	u_int32_t	last_index;
2917 	int		s;
2918 	int		i;
2919 
2920 	s = splbio();
2921 
2922 	if (sc->twa_aen_queue_wrapped)
2923 		last_index = sc->twa_aen_head;
2924 	else
2925 		last_index = 0;
2926 
2927 	i = sc->twa_aen_head;
2928 	do {
2929 		i = (i + TWA_Q_LENGTH - 1) % TWA_Q_LENGTH;
2930 		if ((sc->twa_aen_queue[i])->aen_code == aen_code) {
2931 			splx(s);
2932 			return(0);
2933 		}
2934 	} while (i != last_index);
2935 
2936 	splx(s);
2937 	return(1);
2938 }
2939 
2940 static void inline
2941 twa_request_init(struct twa_request *tr, int flags)
2942 {
2943 	tr->tr_data = NULL;
2944 	tr->tr_real_data = NULL;
2945 	tr->tr_length = 0;
2946 	tr->tr_real_length = 0;
2947 	tr->tr_status = TWA_CMD_SETUP;/* command is in setup phase */
2948 	tr->tr_flags = flags;
2949 	tr->tr_error = 0;
2950 	tr->tr_callback = NULL;
2951 	tr->tr_cmd_pkt_type = 0;
2952 
2953 	/*
2954 	 * Look at the status field in the command packet to see how
2955 	 * it completed the last time it was used, and zero out only
2956 	 * the portions that might have changed.  Note that we don't
2957 	 * care to zero out the sglist.
2958 	 */
2959 	if (tr->tr_command->command.cmd_pkt_9k.status)
2960 		memset(tr->tr_command, 0,
2961 			sizeof(struct twa_command_header) + 28);
2962 	else
2963 		memset(&(tr->tr_command->command), 0, 28);
2964 }
2965 
2966 struct twa_request *
2967 twa_get_request_wait(struct twa_softc *sc, int flags)
2968 {
2969 	struct twa_request *tr;
2970 	int s;
2971 
2972 	KASSERT((flags & TWA_CMD_AEN) == 0);
2973 
2974 	s = splbio();
2975 	while ((tr = TAILQ_FIRST(&sc->twa_free)) == NULL) {
2976 		sc->twa_sc_flags |= TWA_STATE_REQUEST_WAIT;
2977 		(void) tsleep(&sc->twa_free, PRIBIO, "twaccb", hz);
2978 	}
2979 	TAILQ_REMOVE(&sc->twa_free, tr, tr_link);
2980 
2981 	splx(s);
2982 
2983 	twa_request_init(tr, flags);
2984 
2985 	return(tr);
2986 }
2987 
2988 
2989 struct twa_request *
2990 twa_get_request(struct twa_softc *sc, int flags)
2991 {
2992 	int s;
2993 	struct twa_request *tr;
2994 
2995 	/* Get a free request packet. */
2996 	s = splbio();
2997 	if (__predict_false((flags & TWA_CMD_AEN) != 0)) {
2998 
2999 		if ((sc->sc_twa_request->tr_flags & TWA_CMD_AEN_BUSY) == 0) {
3000 			tr = sc->sc_twa_request;
3001 			flags |= TWA_CMD_AEN_BUSY;
3002 		} else {
3003 			splx(s);
3004 			return (NULL);
3005 		}
3006 	} else {
3007 		if (__predict_false((tr =
3008 				TAILQ_FIRST(&sc->twa_free)) == NULL)) {
3009 			splx(s);
3010 			return (NULL);
3011 		}
3012 		TAILQ_REMOVE(&sc->twa_free, tr, tr_link);
3013 	}
3014 	splx(s);
3015 
3016 	twa_request_init(tr, flags);
3017 
3018 	return(tr);
3019 }
3020 
3021 
3022 /*
3023  * Print some information about the controller
3024  */
3025 static void
3026 twa_describe_controller(struct twa_softc *sc)
3027 {
3028 	struct twa_param_9k	*p[10];
3029 	int			i, rv = 0;
3030 	uint32_t		dsize;
3031 	uint8_t			ports;
3032 
3033 	memset(p, sizeof(struct twa_param_9k *), 10);
3034 
3035 	/* Get the port count. */
3036 	rv |= twa_get_param(sc, TWA_PARAM_CONTROLLER,
3037 		TWA_PARAM_CONTROLLER_PortCount, 1, NULL, &p[0]);
3038 
3039 	/* get version strings */
3040 	rv |= twa_get_param(sc, TWA_PARAM_VERSION, TWA_PARAM_VERSION_FW,
3041 		16, NULL, &p[1]);
3042 	rv |= twa_get_param(sc, TWA_PARAM_VERSION, TWA_PARAM_VERSION_BIOS,
3043 		16, NULL, &p[2]);
3044 	rv |= twa_get_param(sc, TWA_PARAM_VERSION, TWA_PARAM_VERSION_Mon,
3045 		16, NULL, &p[3]);
3046 	rv |= twa_get_param(sc, TWA_PARAM_VERSION, TWA_PARAM_VERSION_PCBA,
3047 		8, NULL, &p[4]);
3048 	rv |= twa_get_param(sc, TWA_PARAM_VERSION, TWA_PARAM_VERSION_ATA,
3049 		8, NULL, &p[5]);
3050 	rv |= twa_get_param(sc, TWA_PARAM_VERSION, TWA_PARAM_VERSION_PCI,
3051 		8, NULL, &p[6]);
3052 	rv |= twa_get_param(sc, TWA_PARAM_DRIVESUMMARY, TWA_PARAM_DRIVESTATUS,
3053 		16, NULL, &p[7]);
3054 
3055 	if (rv) {
3056 		/* some error occurred */
3057 		aprint_error("%s: failed to fetch version information\n",
3058 			sc->twa_dv.dv_xname);
3059 		goto bail;
3060 	}
3061 
3062 	ports = *(u_int8_t *)(p[0]->data);
3063 
3064 	aprint_normal("%s: %d ports, Firmware %.16s, BIOS %.16s\n",
3065 		sc->twa_dv.dv_xname, ports,
3066 		p[1]->data, p[2]->data);
3067 
3068 	aprint_verbose("%s: Monitor %.16s, PCB %.8s, Achip %.8s, Pchip %.8s\n",
3069 		sc->twa_dv.dv_xname,
3070 		p[3]->data, p[4]->data,
3071 		p[5]->data, p[6]->data);
3072 
3073 	for (i = 0; i < ports; i++) {
3074 
3075 		if ((*((char *)(p[7]->data + i)) & TWA_DRIVE_DETECTED) == 0)
3076 			continue;
3077 
3078 		rv = twa_get_param(sc, TWA_PARAM_DRIVE_TABLE,
3079 			TWA_PARAM_DRIVEMODELINDEX,
3080 			TWA_PARAM_DRIVEMODEL_LENGTH, NULL, &p[8]);
3081 
3082 		if (rv != 0) {
3083 			aprint_error("%s: unable to get drive model for port"
3084 				" %d\n", sc->twa_dv.dv_xname, i);
3085 			continue;
3086 		}
3087 
3088 		rv = twa_get_param(sc, TWA_PARAM_DRIVE_TABLE,
3089 			TWA_PARAM_DRIVESIZEINDEX,
3090 			TWA_PARAM_DRIVESIZE_LENGTH, NULL, &p[9]);
3091 
3092 		if (rv != 0) {
3093 			aprint_error("%s: unable to get drive size"
3094 				" for port %d\n", sc->twa_dv.dv_xname,
3095 					i);
3096 			free(p[8], M_DEVBUF);
3097 			continue;
3098 		}
3099 
3100 		dsize = *(uint32_t *)(p[9]->data);
3101 
3102 		aprint_verbose("%s: port %d: %.40s %d MB\n",
3103 		    sc->twa_dv.dv_xname, i, p[8]->data, dsize / 2048);
3104 
3105 		if (p[8])
3106 			free(p[8], M_DEVBUF);
3107 		if (p[9])
3108 			free(p[9], M_DEVBUF);
3109 	}
3110 bail:
3111 	if (p[0])
3112 		free(p[0], M_DEVBUF);
3113 	if (p[1])
3114 		free(p[1], M_DEVBUF);
3115 	if (p[2])
3116 		free(p[2], M_DEVBUF);
3117 	if (p[3])
3118 		free(p[3], M_DEVBUF);
3119 	if (p[4])
3120 		free(p[4], M_DEVBUF);
3121 	if (p[5])
3122 		free(p[5], M_DEVBUF);
3123 	if (p[6])
3124 		free(p[6], M_DEVBUF);
3125 }
3126 
3127 
3128 
3129 /*
3130  * Function name:	twa_check_ctlr_state
3131  * Description:		Makes sure that the fw status register reports a
3132  *			proper status.
3133  *
3134  * Input:		sc		-- ptr to per ctlr structure
3135  *			status_reg	-- value in the status register
3136  * Output:		None
3137  * Return value:	0	-- no errors
3138  *			non-zero-- errors
3139  */
3140 static int
3141 twa_check_ctlr_state(struct twa_softc *sc, u_int32_t status_reg)
3142 {
3143 	int		result = 0;
3144 	struct timeval	t1;
3145 	static time_t	last_warning[2] = {0, 0};
3146 
3147 	/* Check if the 'micro-controller ready' bit is not set. */
3148 	if ((status_reg & TWA_STATUS_EXPECTED_BITS) !=
3149 				TWA_STATUS_EXPECTED_BITS) {
3150 
3151 		microtime(&t1);
3152 
3153 		last_warning[0] += (5 * 1000 * 100);
3154 
3155 		if (t1.tv_usec > last_warning[0]) {
3156 			microtime(&t1);
3157 			last_warning[0] = t1.tv_usec;
3158 		}
3159 		result = 1;
3160 	}
3161 
3162 	/* Check if any error bits are set. */
3163 	if ((status_reg & TWA_STATUS_UNEXPECTED_BITS) != 0) {
3164 
3165 		microtime(&t1);
3166 		last_warning[1] += (5 * 1000 * 100);
3167 		if (t1.tv_usec > last_warning[1]) {
3168 		     	microtime(&t1);
3169 			last_warning[1] = t1.tv_usec;
3170 		}
3171 		if (status_reg & TWA_STATUS_PCI_PARITY_ERROR_INTERRUPT) {
3172 			aprint_error("%s: clearing PCI parity error "
3173 				"re-seat/move/replace card.\n",
3174 				 sc->twa_dv.dv_xname);
3175 			twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
3176 				TWA_CONTROL_CLEAR_PARITY_ERROR);
3177 			pci_conf_write(sc->pc, sc->tag,
3178 				PCI_COMMAND_STATUS_REG,
3179 				TWA_PCI_CONFIG_CLEAR_PARITY_ERROR);
3180 			result = 1;
3181 		}
3182 		if (status_reg & TWA_STATUS_PCI_ABORT_INTERRUPT) {
3183 			aprint_error("%s: clearing PCI abort\n",
3184 				sc->twa_dv.dv_xname);
3185 			twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
3186 				TWA_CONTROL_CLEAR_PCI_ABORT);
3187 			pci_conf_write(sc->pc, sc->tag,
3188 				PCI_COMMAND_STATUS_REG,
3189 				TWA_PCI_CONFIG_CLEAR_PCI_ABORT);
3190 			result = 1;
3191 		}
3192 		if (status_reg & TWA_STATUS_QUEUE_ERROR_INTERRUPT) {
3193 			aprint_error("%s: clearing controller queue error\n",
3194 				sc->twa_dv.dv_xname);
3195 			twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
3196 				TWA_CONTROL_CLEAR_PCI_ABORT);
3197 			result = 1;
3198 		}
3199 		if (status_reg & TWA_STATUS_SBUF_WRITE_ERROR) {
3200 			aprint_error("%s: clearing SBUF write error\n",
3201 				sc->twa_dv.dv_xname);
3202 			twa_outl(sc, TWA_CONTROL_REGISTER_OFFSET,
3203 				TWA_CONTROL_CLEAR_SBUF_WRITE_ERROR);
3204 			result = 1;
3205 		}
3206 		if (status_reg & TWA_STATUS_MICROCONTROLLER_ERROR) {
3207 			aprint_error("%s: micro-controller error\n",
3208 				sc->twa_dv.dv_xname);
3209 			result = 1;
3210 		}
3211 	}
3212 	return(result);
3213 }
3214 
3215 
3216