xref: /dflybsd-src/sys/dev/raid/asr/asr.c (revision 7bf41faaed5442d0a7f34840646b4a385034515c)
1 /*-
2  * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
3  * Copyright (c) 2000-2001 Adaptec Corporation
4  * All rights reserved.
5  *
6  * TERMS AND CONDITIONS OF USE
7  *
8  * Redistribution and use in source form, with or without modification, are
9  * permitted provided that redistributions of source code must retain the
10  * above copyright notice, this list of conditions and the following disclaimer.
11  *
12  * This software is provided `as is' by Adaptec and any express or implied
13  * warranties, including, but not limited to, the implied warranties of
14  * merchantability and fitness for a particular purpose, are disclaimed. In no
15  * event shall Adaptec be liable for any direct, indirect, incidental, special,
16  * exemplary or consequential damages (including, but not limited to,
17  * procurement of substitute goods or services; loss of use, data, or profits;
18  * or business interruptions) however caused and on any theory of liability,
19  * whether in contract, strict liability, or tort (including negligence or
20  * otherwise) arising in any way out of the use of this driver software, even
21  * if advised of the possibility of such damage.
22  *
23  * SCSI I2O host adapter driver
24  *
25  *	V1.10 2004/05/05 scottl@freebsd.org
26  *		- Massive cleanup of the driver to remove dead code and
27  *		  non-conformant style.
28  *		- Removed most i386-specific code to make it more portable.
29  *		- Converted to the bus_space API.
30  *	V1.08 2001/08/21 Mark_Salyzyn@adaptec.com
31  *		- The 2000S and 2005S do not initialize on some machines,
32  *		  increased timeout to 255ms from 50ms for the StatusGet
33  *		  command.
34  *	V1.07 2001/05/22 Mark_Salyzyn@adaptec.com
35  *		- I knew this one was too good to be true. The error return
36  *		  on ioctl commands needs to be compared to CAM_REQ_CMP, not
37  *		  to the bit masked status.
38  *	V1.06 2001/05/08 Mark_Salyzyn@adaptec.com
39  *		- The 2005S that was supported is affectionately called the
40  *		  Conjoined BAR Firmware. In order to support RAID-5 in a
41  *		  16MB low-cost configuration, Firmware was forced to go
42  *		  to a Split BAR Firmware. This requires a separate IOP and
43  *		  Messaging base address.
44  *	V1.05 2001/04/25 Mark_Salyzyn@adaptec.com
45  *		- Handle support for 2005S Zero Channel RAID solution.
46  *		- System locked up if the Adapter locked up. Do not try
47  *		  to send other commands if the resetIOP command fails. The
48  *		  fail outstanding command discovery loop was flawed as the
49  *		  removal of the command from the list prevented discovering
50  *		  all the commands.
51  *		- Comment changes to clarify driver.
52  *		- SysInfo searched for an EATA SmartROM, not an I2O SmartROM.
53  *		- We do not use the AC_FOUND_DEV event because of I2O.
54  *		  Removed asr_async.
55  *	V1.04 2000/09/22 Mark_Salyzyn@adaptec.com, msmith@freebsd.org,
56  *			 lampa@fee.vutbr.cz and Scott_Long@adaptec.com.
57  *		- Removed support for PM1554, PM2554 and PM2654 in Mode-0
58  *		  mode as this is confused with competitor adapters in run
59  *		  mode.
60  *		- critical locking needed in ASR_ccbAdd and ASR_ccbRemove
61  *		  to prevent operating system panic.
62  *		- moved default major number to 154 from 97.
63  *	V1.03 2000/07/12 Mark_Salyzyn@adaptec.com
64  *		- The controller is not actually an ASR (Adaptec SCSI RAID)
65  *		  series that is visible, it's more of an internal code name.
66  *		  remove any visible references within reason for now.
67  *		- bus_ptr->LUN was not correctly zeroed when initially
68  *		  allocated causing a possible panic of the operating system
69  *		  during boot.
70  *	V1.02 2000/06/26 Mark_Salyzyn@adaptec.com
71  *		- Code always fails for ASR_getTid affecting performance.
72  *		- initiated a set of changes that resulted from a formal
73  *		  code inspection by Mark_Salyzyn@adaptec.com,
74  *		  George_Dake@adaptec.com, Jeff_Zeak@adaptec.com,
75  *		  Martin_Wilson@adaptec.com and Vincent_Trandoan@adaptec.com.
76  *		  Their findings were focussed on the LCT & TID handler, and
77  *		  all resulting changes were to improve code readability,
78  *		  consistency or have a positive effect on performance.
79  *	V1.01 2000/06/14 Mark_Salyzyn@adaptec.com
80  *		- Passthrough returned an incorrect error.
81  *		- Passthrough did not migrate the intrinsic scsi layer wakeup
82  *		  on command completion.
83  *		- generate control device nodes using make_dev and delete_dev.
84  *		- Performance affected by TID caching reallocing.
85  *		- Made suggested changes by Justin_Gibbs@adaptec.com
86  *			- use splcam instead of splbio.
87  *			- use cam_imask instead of bio_imask.
88  *			- use u_int8_t instead of u_char.
89  *			- use u_int16_t instead of u_short.
90  *			- use u_int32_t instead of u_long where appropriate.
91  *			- use 64 bit context handler instead of 32 bit.
92  *			- create_ccb should only allocate the worst case
93  *			  requirements for the driver since CAM may evolve
94  *			  making union ccb much larger than needed here.
95  *			  renamed create_ccb to asr_alloc_ccb.
96  *			- go nutz justifying all debug prints as macros
97  *			  defined at the top and remove unsightly ifdefs.
98  *			- INLINE STATIC viewed as confusing. Historically
99  *			  utilized to affect code performance and debug
100  *			  issues in OS, Compiler or OEM specific situations.
101  *	V1.00 2000/05/31 Mark_Salyzyn@adaptec.com
102  *		- Ported from FreeBSD 2.2.X DPT I2O driver.
103  *			changed struct scsi_xfer to union ccb/struct ccb_hdr
104  *			changed variable name xs to ccb
105  *			changed struct scsi_link to struct cam_path
106  *			changed struct scsibus_data to struct cam_sim
107  *			stopped using fordriver for holding on to the TID
108  *			use proprietary packet creation instead of scsi_inquire
109  *			CAM layer sends synchronize commands.
110  *
111  * $FreeBSD: src/sys/dev/asr/asr.c,v 1.90 2011/10/13 20:06:19 marius Exp $
112  */
113 
114 #include <sys/cdefs.h>
115 #include <sys/param.h>	/* TRUE=1 and FALSE=0 defined here */
116 #include <sys/kernel.h>
117 #include <sys/module.h>
118 #include <sys/systm.h>
119 #include <sys/malloc.h>
120 #include <sys/conf.h>
121 #include <sys/priv.h>
122 #include <sys/proc.h>
123 #include <sys/bus.h>
124 #include <sys/rman.h>
125 #include <sys/stat.h>
126 #include <sys/device.h>
127 #include <sys/thread2.h>
128 #include <sys/bus_dma.h>
129 
130 #include <bus/cam/cam.h>
131 #include <bus/cam/cam_ccb.h>
132 #include <bus/cam/cam_sim.h>
133 #include <bus/cam/cam_xpt_sim.h>
134 
135 #include <bus/cam/scsi/scsi_all.h>
136 #include <bus/cam/scsi/scsi_message.h>
137 
138 #include <vm/vm.h>
139 #include <vm/pmap.h>
140 
141 #if defined(__i386__)
142 #include "opt_asr.h"
143 #include <machine/cputypes.h>
144 
145 #if defined(ASR_COMPAT)
146 #define ASR_IOCTL_COMPAT
147 #endif /* ASR_COMPAT */
148 #endif
149 #include <machine/vmparam.h>
150 
151 #include <bus/pci/pcivar.h>
152 #include <bus/pci/pcireg.h>
153 
154 #define	osdSwap4(x) ((u_long)ntohl((u_long)(x)))
155 #define	KVTOPHYS(x) vtophys(x)
156 #include	<dev/raid/asr/dptalign.h>
157 #include	<dev/raid/asr/i2oexec.h>
158 #include	<dev/raid/asr/i2obscsi.h>
159 #include	<dev/raid/asr/i2odpt.h>
160 #include	<dev/raid/asr/i2oadptr.h>
161 
162 #include	<dev/raid/asr/sys_info.h>
163 
164 #define	ASR_VERSION	1
165 #define	ASR_REVISION	'1'
166 #define	ASR_SUBREVISION '0'
167 #define	ASR_MONTH	5
168 #define	ASR_DAY		5
169 #define	ASR_YEAR	(2004 - 1980)
170 
171 /*
172  *	Debug macros to reduce the unsightly ifdefs
173  */
174 #if (defined(DEBUG_ASR) || defined(DEBUG_ASR_USR_CMD) || defined(DEBUG_ASR_CMD))
175 static __inline void
176 debug_asr_message(PI2O_MESSAGE_FRAME message)
177 {
178 	u_int32_t * pointer = (u_int32_t *)message;
179 	u_int32_t   length = I2O_MESSAGE_FRAME_getMessageSize(message);
180 	u_int32_t   counter = 0;
181 
182 	while (length--) {
183 		kprintf("%08lx%c", (u_long)*(pointer++),
184 		  (((++counter & 7) == 0) || (length == 0)) ? '\n' : ' ');
185 	}
186 }
187 #endif /* DEBUG_ASR || DEBUG_ASR_USR_CMD || DEBUG_ASR_CMD */
188 
189 #ifdef DEBUG_ASR
190   /* Breaks on none STDC based compilers :-( */
191 #define debug_asr_printf(fmt,args...)	kprintf(fmt, ##args)
192 #define debug_asr_dump_message(message)	debug_asr_message(message)
193 #define debug_asr_print_path(ccb)	xpt_print_path(ccb->ccb_h.path);
194 #else /* DEBUG_ASR */
195 #define debug_asr_printf(fmt,args...)
196 #define debug_asr_dump_message(message)
197 #define debug_asr_print_path(ccb)
198 #endif /* DEBUG_ASR */
199 
200 /*
201  *	If DEBUG_ASR_CMD is defined:
202  *		0 - Display incoming SCSI commands
203  *		1 - add in a quick character before queueing.
204  *		2 - add in outgoing message frames.
205  */
206 #if (defined(DEBUG_ASR_CMD))
207 #define debug_asr_cmd_printf(fmt,args...)     kprintf(fmt,##args)
208 static __inline void
209 debug_asr_dump_ccb(union ccb *ccb)
210 {
211 	u_int8_t	*cp = (unsigned char *)&(ccb->csio.cdb_io);
212 	int		len = ccb->csio.cdb_len;
213 
214 	while (len) {
215 		debug_asr_cmd_printf (" %02x", *(cp++));
216 		--len;
217 	}
218 }
219 #if (DEBUG_ASR_CMD > 0)
220 #define debug_asr_cmd1_printf		       debug_asr_cmd_printf
221 #else
222 #define debug_asr_cmd1_printf(fmt,args...)
223 #endif
224 #if (DEBUG_ASR_CMD > 1)
225 #define debug_asr_cmd2_printf			debug_asr_cmd_printf
226 #define debug_asr_cmd2_dump_message(message)	debug_asr_message(message)
227 #else
228 #define debug_asr_cmd2_printf(fmt,args...)
229 #define debug_asr_cmd2_dump_message(message)
230 #endif
231 #else /* DEBUG_ASR_CMD */
232 #define debug_asr_cmd_printf(fmt,args...)
233 #define debug_asr_dump_ccb(ccb)
234 #define debug_asr_cmd1_printf(fmt,args...)
235 #define debug_asr_cmd2_printf(fmt,args...)
236 #define debug_asr_cmd2_dump_message(message)
237 #endif /* DEBUG_ASR_CMD */
238 
239 #if (defined(DEBUG_ASR_USR_CMD))
240 #define debug_usr_cmd_printf(fmt,args...)   kprintf(fmt,##args)
241 #define debug_usr_cmd_dump_message(message) debug_usr_message(message)
242 #else /* DEBUG_ASR_USR_CMD */
243 #define debug_usr_cmd_printf(fmt,args...)
244 #define debug_usr_cmd_dump_message(message)
245 #endif /* DEBUG_ASR_USR_CMD */
246 
247 #ifdef ASR_IOCTL_COMPAT
248 #define	dsDescription_size 46	/* Snug as a bug in a rug */
249 #endif /* ASR_IOCTL_COMPAT */
250 
251 #include "dev/raid/asr/dptsig.h"
252 
253 static dpt_sig_S ASR_sig = {
254 	{ 'd', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, PROC_INTEL,
255 	PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, FT_HBADRVR, 0,
256 	OEM_DPT, OS_FREE_BSD, CAP_ABOVE16MB, DEV_ALL, ADF_ALL_SC5,
257 	0, 0, ASR_VERSION, ASR_REVISION, ASR_SUBREVISION,
258 	ASR_MONTH, ASR_DAY, ASR_YEAR,
259 /*	 01234567890123456789012345678901234567890123456789	< 50 chars */
260 	"Adaptec FreeBSD 4.0.0 Unix SCSI I2O HBA Driver"
261 	/*		 ^^^^^ asr_attach alters these to match OS */
262 };
263 
264 /* Configuration Definitions */
265 
266 #define	SG_SIZE		 58	/* Scatter Gather list Size		 */
267 #define	MAX_TARGET_ID	 126	/* Maximum Target ID supported		 */
268 #define	MAX_LUN		 255	/* Maximum LUN Supported		 */
269 #define	MAX_CHANNEL	 7	/* Maximum Channel # Supported by driver */
270 #define	MAX_INBOUND	 2000	/* Max CCBs, Also Max Queue Size	 */
271 #define	MAX_OUTBOUND	 256	/* Maximum outbound frames/adapter	 */
272 #define	MAX_INBOUND_SIZE 512	/* Maximum inbound frame size		 */
273 #define	MAX_MAP		 4194304L /* Maximum mapping size of IOP	 */
274 				/* Also serves as the minimum map for	 */
275 				/* the 2005S zero channel RAID product	 */
276 
277 /* I2O register set */
278 #define	I2O_REG_STATUS		0x30
279 #define	I2O_REG_MASK		0x34
280 #define	I2O_REG_TOFIFO		0x40
281 #define	I2O_REG_FROMFIFO	0x44
282 
283 #define	Mask_InterruptsDisabled	0x08
284 
285 /*
286  * A MIX of performance and space considerations for TID lookups
287  */
288 typedef u_int16_t tid_t;
289 
290 typedef struct {
291 	u_int32_t size;		/* up to MAX_LUN    */
292 	tid_t	  TID[1];
293 } lun2tid_t;
294 
295 typedef struct {
296 	u_int32_t   size;	/* up to MAX_TARGET */
297 	lun2tid_t * LUN[1];
298 } target2lun_t;
299 
300 /*
301  *	To ensure that we only allocate and use the worst case ccb here, lets
302  *	make our own local ccb union. If asr_alloc_ccb is utilized for another
303  *	ccb type, ensure that you add the additional structures into our local
304  *	ccb union. To ensure strict type checking, we will utilize the local
305  *	ccb definition wherever possible.
306  */
307 union asr_ccb {
308 	struct ccb_hdr	    ccb_h;  /* For convenience */
309 	struct ccb_scsiio   csio;
310 	struct ccb_setasync csa;
311 };
312 
313 struct Asr_status_mem {
314 	I2O_EXEC_STATUS_GET_REPLY	status;
315 	U32				rstatus;
316 };
317 
318 /**************************************************************************
319 ** ASR Host Adapter structure - One Structure For Each Host Adapter That **
320 **  Is Configured Into The System.  The Structure Supplies Configuration **
321 **  Information, Status Info, Queue Info And An Active CCB List Pointer. **
322 ***************************************************************************/
323 
324 typedef struct Asr_softc {
325 	device_t		ha_dev;
326 	u_int16_t		ha_irq;
327 	u_long			ha_Base;       /* base port for each board */
328 	bus_size_t		ha_blinkLED;
329 	bus_space_handle_t	ha_i2o_bhandle;
330 	bus_space_tag_t		ha_i2o_btag;
331 	bus_space_handle_t	ha_frame_bhandle;
332 	bus_space_tag_t		ha_frame_btag;
333 	I2O_IOP_ENTRY		ha_SystemTable;
334 	LIST_HEAD(,ccb_hdr)	ha_ccb;	       /* ccbs in use		   */
335 
336 	bus_dma_tag_t		ha_parent_dmat;
337 	bus_dma_tag_t		ha_statusmem_dmat;
338 	bus_dmamap_t		ha_statusmem_dmamap;
339 	struct Asr_status_mem * ha_statusmem;
340 	u_int32_t		ha_rstatus_phys;
341 	u_int32_t		ha_status_phys;
342 	struct cam_path	      * ha_path[MAX_CHANNEL+1];
343 	struct cam_sim	      * ha_sim[MAX_CHANNEL+1];
344 	struct resource	      * ha_mem_res;
345 	struct resource	      * ha_mes_res;
346 	struct resource	      * ha_irq_res;
347 	void		      * ha_intr;
348 	PI2O_LCT		ha_LCT;	       /* Complete list of devices */
349 #define le_type	  IdentityTag[0]
350 #define I2O_BSA	    0x20
351 #define I2O_FCA	    0x40
352 #define I2O_SCSI    0x00
353 #define I2O_PORT    0x80
354 #define I2O_UNKNOWN 0x7F
355 #define le_bus	  IdentityTag[1]
356 #define le_target IdentityTag[2]
357 #define le_lun	  IdentityTag[3]
358 	target2lun_t	      * ha_targets[MAX_CHANNEL+1];
359 	PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME ha_Msgs;
360 	u_long			ha_Msgs_Phys;
361 
362 	u_int8_t		ha_in_reset;
363 #define HA_OPERATIONAL	    0
364 #define HA_IN_RESET	    1
365 #define HA_OFF_LINE	    2
366 #define HA_OFF_LINE_RECOVERY 3
367 	/* Configuration information */
368 	/* The target id maximums we take */
369 	u_int8_t		ha_MaxBus;     /* Maximum bus */
370 	u_int8_t		ha_MaxId;      /* Maximum target ID */
371 	u_int8_t		ha_MaxLun;     /* Maximum target LUN */
372 	u_int8_t		ha_SgSize;     /* Max SG elements */
373 	u_int8_t		ha_pciBusNum;
374 	u_int8_t		ha_pciDeviceNum;
375 	u_int8_t		ha_adapter_target[MAX_CHANNEL+1];
376 	u_int16_t		ha_QueueSize;  /* Max outstanding commands */
377 	u_int16_t		ha_Msgs_Count;
378 
379 	/* Links into other parents and HBAs */
380 	struct Asr_softc      * ha_next;       /* HBA list */
381 	struct cdev *ha_devt;
382 } Asr_softc_t;
383 
384 static Asr_softc_t *Asr_softc_list;
385 
386 /*
387  *	Prototypes of the routines we have in this object.
388  */
389 
390 /* I2O HDM interface */
391 static int	asr_probe(device_t dev);
392 static int	asr_attach(device_t dev);
393 
394 static d_ioctl_t asr_ioctl;
395 static d_open_t asr_open;
396 static d_close_t asr_close;
397 static int	asr_intr(Asr_softc_t *sc);
398 static void	asr_timeout(void *arg);
399 static int	ASR_init(Asr_softc_t *sc);
400 static int	ASR_acquireLct(Asr_softc_t *sc);
401 static int	ASR_acquireHrt(Asr_softc_t *sc);
402 static void	asr_action(struct cam_sim *sim, union ccb *ccb);
403 static void	asr_poll(struct cam_sim *sim);
404 static int	ASR_queue(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message);
405 
406 /*
407  *	Here is the auto-probe structure used to nest our tests appropriately
408  *	during the startup phase of the operating system.
409  */
410 static device_method_t asr_methods[] = {
411 	DEVMETHOD(device_probe,	 asr_probe),
412 	DEVMETHOD(device_attach, asr_attach),
413 	{ 0, 0 }
414 };
415 
416 static driver_t asr_driver = {
417 	"asr",
418 	asr_methods,
419 	sizeof(Asr_softc_t)
420 };
421 
422 static devclass_t asr_devclass;
423 DRIVER_MODULE(asr, pci, asr_driver, asr_devclass, 0, 0);
424 MODULE_VERSION(asr, 1);
425 MODULE_DEPEND(asr, pci, 1, 1, 1);
426 MODULE_DEPEND(asr, cam, 1, 1, 1);
427 
428 /*
429  * devsw for asr hba driver
430  *
431  * only ioctl is used. the sd driver provides all other access.
432  */
433 static struct dev_ops asr_ops = {
434 	{ "asr", 0, 0 },
435 	.d_open =	asr_open,
436 	.d_close =	asr_close,
437 	.d_ioctl =	asr_ioctl,
438 };
439 
440 /* I2O support routines */
441 
442 static __inline u_int32_t
443 asr_get_FromFIFO(Asr_softc_t *sc)
444 {
445 	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
446 				 I2O_REG_FROMFIFO));
447 }
448 
449 static __inline u_int32_t
450 asr_get_ToFIFO(Asr_softc_t *sc)
451 {
452 	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
453 				 I2O_REG_TOFIFO));
454 }
455 
456 static __inline u_int32_t
457 asr_get_intr(Asr_softc_t *sc)
458 {
459 	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
460 				 I2O_REG_MASK));
461 }
462 
463 static __inline u_int32_t
464 asr_get_status(Asr_softc_t *sc)
465 {
466 	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
467 				 I2O_REG_STATUS));
468 }
469 
470 static __inline void
471 asr_set_FromFIFO(Asr_softc_t *sc, u_int32_t val)
472 {
473 	bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_FROMFIFO,
474 			  val);
475 }
476 
477 static __inline void
478 asr_set_ToFIFO(Asr_softc_t *sc, u_int32_t val)
479 {
480 	bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_TOFIFO,
481 			  val);
482 }
483 
484 static __inline void
485 asr_set_intr(Asr_softc_t *sc, u_int32_t val)
486 {
487 	bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_MASK,
488 			  val);
489 }
490 
491 static __inline void
492 asr_set_frame(Asr_softc_t *sc, void *frame, u_int32_t offset, int len)
493 {
494 	bus_space_write_region_4(sc->ha_frame_btag, sc->ha_frame_bhandle,
495 				 offset, (u_int32_t *)frame, len);
496 }
497 
498 /*
499  *	Fill message with default.
500  */
501 static PI2O_MESSAGE_FRAME
502 ASR_fillMessage(void *Message, u_int16_t size)
503 {
504 	PI2O_MESSAGE_FRAME Message_Ptr;
505 
506 	Message_Ptr = (I2O_MESSAGE_FRAME *)Message;
507 	bzero(Message_Ptr, size);
508 	I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11);
509 	I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
510 	  (size + sizeof(U32) - 1) >> 2);
511 	I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
512 	KASSERT(Message_Ptr != NULL, ("Message_Ptr == NULL"));
513 	return (Message_Ptr);
514 } /* ASR_fillMessage */
515 
516 #define	EMPTY_QUEUE (0xffffffff)
517 
518 static __inline U32
519 ASR_getMessage(Asr_softc_t *sc)
520 {
521 	U32	MessageOffset;
522 
523 	MessageOffset = asr_get_ToFIFO(sc);
524 	if (MessageOffset == EMPTY_QUEUE)
525 		MessageOffset = asr_get_ToFIFO(sc);
526 
527 	return (MessageOffset);
528 } /* ASR_getMessage */
529 
530 /* Issue a polled command */
531 static U32
532 ASR_initiateCp(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
533 {
534 	U32	Mask = 0xffffffff;
535 	U32	MessageOffset;
536 	u_int	Delay = 1500;
537 
538 	/*
539 	 * ASR_initiateCp is only used for synchronous commands and will
540 	 * be made more resiliant to adapter delays since commands like
541 	 * resetIOP can cause the adapter to be deaf for a little time.
542 	 */
543 	while (((MessageOffset = ASR_getMessage(sc)) == EMPTY_QUEUE)
544 	 && (--Delay != 0)) {
545 		DELAY (10000);
546 	}
547 	if (MessageOffset != EMPTY_QUEUE) {
548 		asr_set_frame(sc, Message, MessageOffset,
549 			      I2O_MESSAGE_FRAME_getMessageSize(Message));
550 		/*
551 		 *	Disable the Interrupts
552 		 */
553 		Mask = asr_get_intr(sc);
554 		asr_set_intr(sc, Mask | Mask_InterruptsDisabled);
555 		asr_set_ToFIFO(sc, MessageOffset);
556 	}
557 	return (Mask);
558 } /* ASR_initiateCp */
559 
560 /*
561  *	Reset the adapter.
562  */
563 static U32
564 ASR_resetIOP(Asr_softc_t *sc)
565 {
566 	I2O_EXEC_IOP_RESET_MESSAGE	 Message;
567 	PI2O_EXEC_IOP_RESET_MESSAGE	 Message_Ptr;
568 	U32			       * Reply_Ptr;
569 	U32				 Old;
570 
571 	/*
572 	 *  Build up our copy of the Message.
573 	 */
574 	Message_Ptr = (PI2O_EXEC_IOP_RESET_MESSAGE)ASR_fillMessage(&Message,
575 	  sizeof(I2O_EXEC_IOP_RESET_MESSAGE));
576 	I2O_EXEC_IOP_RESET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_IOP_RESET);
577 	/*
578 	 *  Reset the Reply Status
579 	 */
580 	Reply_Ptr = &sc->ha_statusmem->rstatus;
581 	*Reply_Ptr = 0;
582 	I2O_EXEC_IOP_RESET_MESSAGE_setStatusWordLowAddress(Message_Ptr,
583 	    sc->ha_rstatus_phys);
584 	/*
585 	 *	Send the Message out
586 	 */
587 	if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
588 	     0xffffffff) {
589 		/*
590 		 * Wait for a response (Poll), timeouts are dangerous if
591 		 * the card is truly responsive. We assume response in 2s.
592 		 */
593 		u_int8_t Delay = 200;
594 
595 		while ((*Reply_Ptr == 0) && (--Delay != 0)) {
596 			DELAY (10000);
597 		}
598 		/*
599 		 *	Re-enable the interrupts.
600 		 */
601 		asr_set_intr(sc, Old);
602 		KASSERT(*Reply_Ptr != 0, ("*Reply_Ptr == 0"));
603 		return(*Reply_Ptr);
604 	}
605 	KASSERT(Old != 0xffffffff, ("Old == -1"));
606 	return (0);
607 } /* ASR_resetIOP */
608 
609 /*
610  *	Get the curent state of the adapter
611  */
612 static PI2O_EXEC_STATUS_GET_REPLY
613 ASR_getStatus(Asr_softc_t *sc)
614 {
615 	I2O_EXEC_STATUS_GET_MESSAGE	Message;
616 	PI2O_EXEC_STATUS_GET_MESSAGE	Message_Ptr;
617 	PI2O_EXEC_STATUS_GET_REPLY	buffer;
618 	U32				Old;
619 
620 	/*
621 	 *  Build up our copy of the Message.
622 	 */
623 	Message_Ptr = (PI2O_EXEC_STATUS_GET_MESSAGE)ASR_fillMessage(&Message,
624 	    sizeof(I2O_EXEC_STATUS_GET_MESSAGE));
625 	I2O_EXEC_STATUS_GET_MESSAGE_setFunction(Message_Ptr,
626 	    I2O_EXEC_STATUS_GET);
627 	I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferAddressLow(Message_Ptr,
628 	    sc->ha_status_phys);
629 	/* This one is a Byte Count */
630 	I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferLength(Message_Ptr,
631 	    sizeof(I2O_EXEC_STATUS_GET_REPLY));
632 	/*
633 	 *  Reset the Reply Status
634 	 */
635 	buffer = &sc->ha_statusmem->status;
636 	bzero(buffer, sizeof(I2O_EXEC_STATUS_GET_REPLY));
637 	/*
638 	 *	Send the Message out
639 	 */
640 	if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
641 	    0xffffffff) {
642 		/*
643 		 *	Wait for a response (Poll), timeouts are dangerous if
644 		 * the card is truly responsive. We assume response in 50ms.
645 		 */
646 		u_int8_t Delay = 255;
647 
648 		while (*((U8 * volatile)&(buffer->SyncByte)) == 0) {
649 			if (--Delay == 0) {
650 				buffer = NULL;
651 				break;
652 			}
653 			DELAY (1000);
654 		}
655 		/*
656 		 *	Re-enable the interrupts.
657 		 */
658 		asr_set_intr(sc, Old);
659 		return (buffer);
660 	}
661 	return (NULL);
662 } /* ASR_getStatus */
663 
664 /*
665  *	Check if the device is a SCSI I2O HBA, and add it to the list.
666  */
667 
668 /*
669  * Probe for ASR controller.  If we find it, we will use it.
670  * virtual adapters.
671  */
672 static int
673 asr_probe(device_t dev)
674 {
675 	u_int32_t id;
676 
677 	id = (pci_get_device(dev) << 16) | pci_get_vendor(dev);
678 	if ((id == 0xA5011044) || (id == 0xA5111044)) {
679 		device_set_desc(dev, "Adaptec Caching SCSI RAID");
680 		return (BUS_PROBE_DEFAULT);
681 	}
682 	return (ENXIO);
683 } /* asr_probe */
684 
685 static __inline union asr_ccb *
686 asr_alloc_ccb(Asr_softc_t *sc)
687 {
688 	union asr_ccb *new_ccb;
689 
690 	if ((new_ccb = (union asr_ccb *)kmalloc(sizeof(*new_ccb),
691 	  M_DEVBUF, M_WAITOK | M_ZERO)) != NULL) {
692 		new_ccb->ccb_h.pinfo.priority = 1;
693 		new_ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX;
694 		new_ccb->ccb_h.spriv_ptr0 = sc;
695 	}
696 	return (new_ccb);
697 } /* asr_alloc_ccb */
698 
699 static __inline void
700 asr_free_ccb(union asr_ccb *free_ccb)
701 {
702 	kfree(free_ccb, M_DEVBUF);
703 } /* asr_free_ccb */
704 
705 /*
706  *	Print inquiry data `carefully'
707  */
708 static void
709 ASR_prstring(u_int8_t *s, int len)
710 {
711 	while ((--len >= 0) && (*s) && (*s != ' ') && (*s != '-')) {
712 		kprintf ("%c", *(s++));
713 	}
714 } /* ASR_prstring */
715 
716 /*
717  *	Send a message synchronously and without Interrupt to a ccb.
718  */
719 static int
720 ASR_queue_s(union asr_ccb *ccb, PI2O_MESSAGE_FRAME Message)
721 {
722 	U32		Mask;
723 	Asr_softc_t	*sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
724 
725 	/*
726 	 * We do not need any (optional byteswapping) method access to
727 	 * the Initiator context field.
728 	 */
729 	I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
730 
731 	/* Prevent interrupt service */
732 	crit_enter();
733 	Mask = asr_get_intr(sc);
734 	asr_set_intr(sc, Mask | Mask_InterruptsDisabled);
735 
736 	if (ASR_queue(sc, Message) == EMPTY_QUEUE) {
737 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
738 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
739 	}
740 
741 	/*
742 	 * Wait for this board to report a finished instruction.
743 	 */
744 	while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
745 		(void)asr_intr (sc);
746 	}
747 
748 	/* Re-enable Interrupts */
749 	asr_set_intr(sc, Mask);
750 	crit_exit();
751 
752 	return (ccb->ccb_h.status);
753 } /* ASR_queue_s */
754 
755 /*
756  *	Send a message synchronously to an Asr_softc_t.
757  */
758 static int
759 ASR_queue_c(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
760 {
761 	union asr_ccb	*ccb;
762 	int		status;
763 
764 	if ((ccb = asr_alloc_ccb (sc)) == NULL) {
765 		return (CAM_REQUEUE_REQ);
766 	}
767 
768 	status = ASR_queue_s (ccb, Message);
769 
770 	asr_free_ccb(ccb);
771 
772 	return (status);
773 } /* ASR_queue_c */
774 
775 /*
776  *	Add the specified ccb to the active queue
777  */
778 static __inline void
779 ASR_ccbAdd(Asr_softc_t *sc, union asr_ccb *ccb)
780 {
781 	crit_enter();
782 	LIST_INSERT_HEAD(&(sc->ha_ccb), &(ccb->ccb_h), sim_links.le);
783 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
784 		if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) {
785 			/*
786 			 * RAID systems can take considerable time to
787 			 * complete some commands given the large cache
788 			 * flashes switching from write back to write thru.
789 			 */
790 			ccb->ccb_h.timeout = 6 * 60 * 1000;
791 		}
792 		callout_reset(&ccb->ccb_h.timeout_ch,
793 		    (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb);
794 	}
795 	crit_exit();
796 } /* ASR_ccbAdd */
797 
798 /*
799  *	Remove the specified ccb from the active queue.
800  */
801 static __inline void
802 ASR_ccbRemove(Asr_softc_t *sc, union asr_ccb *ccb)
803 {
804 	crit_enter();
805 	callout_stop(&ccb->ccb_h.timeout_ch);
806 	LIST_REMOVE(&(ccb->ccb_h), sim_links.le);
807 	crit_exit();
808 } /* ASR_ccbRemove */
809 
810 /*
811  *	Fail all the active commands, so they get re-issued by the operating
812  *	system.
813  */
814 static void
815 ASR_failActiveCommands(Asr_softc_t *sc)
816 {
817 	struct ccb_hdr	*ccb;
818 
819 	crit_enter();
820 	/*
821 	 *	We do not need to inform the CAM layer that we had a bus
822 	 * reset since we manage it on our own, this also prevents the
823 	 * SCSI_DELAY settling that would be required on other systems.
824 	 * The `SCSI_DELAY' has already been handled by the card via the
825 	 * acquisition of the LCT table while we are at CAM priority level.
826 	 *  for (int bus = 0; bus <= sc->ha_MaxBus; ++bus) {
827 	 *	xpt_async (AC_BUS_RESET, sc->ha_path[bus], NULL);
828 	 *  }
829 	 */
830 	while ((ccb = LIST_FIRST(&(sc->ha_ccb))) != NULL) {
831 		ASR_ccbRemove (sc, (union asr_ccb *)ccb);
832 
833 		ccb->status &= ~CAM_STATUS_MASK;
834 		ccb->status |= CAM_REQUEUE_REQ;
835 		/* Nothing Transfered */
836 		((struct ccb_scsiio *)ccb)->resid
837 		  = ((struct ccb_scsiio *)ccb)->dxfer_len;
838 
839 		if (ccb->path) {
840 			xpt_done ((union ccb *)ccb);
841 		} else {
842 			wakeup (ccb);
843 		}
844 	}
845 	crit_exit();
846 } /* ASR_failActiveCommands */
847 
848 /*
849  *	The following command causes the HBA to reset the specific bus
850  */
851 static void
852 ASR_resetBus(Asr_softc_t *sc, int bus)
853 {
854 	I2O_HBA_BUS_RESET_MESSAGE	Message;
855 	I2O_HBA_BUS_RESET_MESSAGE	*Message_Ptr;
856 	PI2O_LCT_ENTRY			Device;
857 
858 	Message_Ptr = (I2O_HBA_BUS_RESET_MESSAGE *)ASR_fillMessage(&Message,
859 	  sizeof(I2O_HBA_BUS_RESET_MESSAGE));
860 	I2O_MESSAGE_FRAME_setFunction(&Message_Ptr->StdMessageFrame,
861 	  I2O_HBA_BUS_RESET);
862 	for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
863 	  (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
864 	  ++Device) {
865 		if (((Device->le_type & I2O_PORT) != 0)
866 		 && (Device->le_bus == bus)) {
867 			I2O_MESSAGE_FRAME_setTargetAddress(
868 			  &Message_Ptr->StdMessageFrame,
869 			  I2O_LCT_ENTRY_getLocalTID(Device));
870 			/* Asynchronous command, with no expectations */
871 			(void)ASR_queue(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
872 			break;
873 		}
874 	}
875 } /* ASR_resetBus */
876 
877 static __inline int
878 ASR_getBlinkLedCode(Asr_softc_t *sc)
879 {
880 	U8	blink;
881 
882 	if (sc == NULL)
883 		return (0);
884 
885 	blink = bus_space_read_1(sc->ha_frame_btag,
886 				 sc->ha_frame_bhandle, sc->ha_blinkLED + 1);
887 	if (blink != 0xBC)
888 		return (0);
889 
890 	blink = bus_space_read_1(sc->ha_frame_btag,
891 				 sc->ha_frame_bhandle, sc->ha_blinkLED);
892 	return (blink);
893 } /* ASR_getBlinkCode */
894 
895 /*
896  *	Determine the address of an TID lookup. Must be done at high priority
897  *	since the address can be changed by other threads of execution.
898  *
899  *	Returns NULL pointer if not indexible (but will attempt to generate
900  *	an index if `new_entry' flag is set to TRUE).
901  *
902  *	All addressible entries are to be guaranteed zero if never initialized.
903  */
904 static tid_t *
905 ASR_getTidAddress(Asr_softc_t *sc, int bus, int target, int lun, int new_entry)
906 {
907 	target2lun_t	*bus_ptr;
908 	lun2tid_t	*target_ptr;
909 	unsigned	new_size;
910 
911 	/*
912 	 *	Validity checking of incoming parameters. More of a bound
913 	 * expansion limit than an issue with the code dealing with the
914 	 * values.
915 	 *
916 	 *	sc must be valid before it gets here, so that check could be
917 	 * dropped if speed a critical issue.
918 	 */
919 	if ((sc == NULL)
920 	 || (bus > MAX_CHANNEL)
921 	 || (target > sc->ha_MaxId)
922 	 || (lun > sc->ha_MaxLun)) {
923 		debug_asr_printf("(%lx,%d,%d,%d) target out of range\n",
924 		  (u_long)sc, bus, target, lun);
925 		return (NULL);
926 	}
927 	/*
928 	 *	See if there is an associated bus list.
929 	 *
930 	 *	for performance, allocate in size of BUS_CHUNK chunks.
931 	 *	BUS_CHUNK must be a power of two. This is to reduce
932 	 *	fragmentation effects on the allocations.
933 	 */
934 #define BUS_CHUNK 8
935 	new_size = ((target + BUS_CHUNK - 1) & ~(BUS_CHUNK - 1));
936 	if ((bus_ptr = sc->ha_targets[bus]) == NULL) {
937 		/*
938 		 *	Allocate a new structure?
939 		 *		Since one element in structure, the +1
940 		 *		needed for size has been abstracted.
941 		 */
942 		if ((new_entry == FALSE)
943 		 || ((sc->ha_targets[bus] = bus_ptr = (target2lun_t *)kmalloc (
944 		    sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
945 		    M_TEMP, M_WAITOK | M_ZERO))
946 		   == NULL)) {
947 			debug_asr_printf("failed to allocate bus list\n");
948 			return (NULL);
949 		}
950 		bus_ptr->size = new_size + 1;
951 	} else if (bus_ptr->size <= new_size) {
952 		target2lun_t * new_bus_ptr;
953 
954 		/*
955 		 *	Reallocate a new structure?
956 		 *		Since one element in structure, the +1
957 		 *		needed for size has been abstracted.
958 		 */
959 		if ((new_entry == FALSE)
960 		 || ((new_bus_ptr = (target2lun_t *)kmalloc (
961 		    sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
962 		    M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
963 			debug_asr_printf("failed to reallocate bus list\n");
964 			return (NULL);
965 		}
966 		/*
967 		 *	Copy the whole thing, safer, simpler coding
968 		 * and not really performance critical at this point.
969 		 */
970 		bcopy(bus_ptr, new_bus_ptr, sizeof(*bus_ptr)
971 		    + (sizeof(bus_ptr->LUN) * (bus_ptr->size - 1)));
972 		sc->ha_targets[bus] = new_bus_ptr;
973 		kfree(bus_ptr, M_TEMP);
974 		bus_ptr = new_bus_ptr;
975 		bus_ptr->size = new_size + 1;
976 	}
977 	/*
978 	 *	We now have the bus list, lets get to the target list.
979 	 *	Since most systems have only *one* lun, we do not allocate
980 	 *	in chunks as above, here we allow one, then in chunk sizes.
981 	 *	TARGET_CHUNK must be a power of two. This is to reduce
982 	 *	fragmentation effects on the allocations.
983 	 */
984 #define TARGET_CHUNK 8
985 	if ((new_size = lun) != 0) {
986 		new_size = ((lun + TARGET_CHUNK - 1) & ~(TARGET_CHUNK - 1));
987 	}
988 	if ((target_ptr = bus_ptr->LUN[target]) == NULL) {
989 		/*
990 		 *	Allocate a new structure?
991 		 *		Since one element in structure, the +1
992 		 *		needed for size has been abstracted.
993 		 */
994 		if ((new_entry == FALSE)
995 		 || ((bus_ptr->LUN[target] = target_ptr = (lun2tid_t *)kmalloc (
996 		    sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
997 		    M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
998 			debug_asr_printf("failed to allocate target list\n");
999 			return (NULL);
1000 		}
1001 		target_ptr->size = new_size + 1;
1002 	} else if (target_ptr->size <= new_size) {
1003 		lun2tid_t * new_target_ptr;
1004 
1005 		/*
1006 		 *	Reallocate a new structure?
1007 		 *		Since one element in structure, the +1
1008 		 *		needed for size has been abstracted.
1009 		 */
1010 		if ((new_entry == FALSE)
1011 		 || ((new_target_ptr = (lun2tid_t *)kmalloc (
1012 		    sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1013 		    M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
1014 			debug_asr_printf("failed to reallocate target list\n");
1015 			return (NULL);
1016 		}
1017 		/*
1018 		 *	Copy the whole thing, safer, simpler coding
1019 		 * and not really performance critical at this point.
1020 		 */
1021 		bcopy(target_ptr, new_target_ptr, sizeof(*target_ptr)
1022 		    + (sizeof(target_ptr->TID) * (target_ptr->size - 1)));
1023 		bus_ptr->LUN[target] = new_target_ptr;
1024 		kfree(target_ptr, M_TEMP);
1025 		target_ptr = new_target_ptr;
1026 		target_ptr->size = new_size + 1;
1027 	}
1028 	/*
1029 	 *	Now, acquire the TID address from the LUN indexed list.
1030 	 */
1031 	return (&(target_ptr->TID[lun]));
1032 } /* ASR_getTidAddress */
1033 
1034 /*
1035  *	Get a pre-existing TID relationship.
1036  *
1037  *	If the TID was never set, return (tid_t)-1.
1038  *
1039  *	should use mutex rather than spl.
1040  */
1041 static __inline tid_t
1042 ASR_getTid(Asr_softc_t *sc, int bus, int target, int lun)
1043 {
1044 	tid_t	*tid_ptr;
1045 	tid_t	retval;
1046 
1047 	crit_enter();
1048 	if (((tid_ptr = ASR_getTidAddress(sc, bus, target, lun, FALSE)) == NULL)
1049 	/* (tid_t)0 or (tid_t)-1 indicate no TID */
1050 	 || (*tid_ptr == (tid_t)0)) {
1051 		crit_exit();
1052 		return ((tid_t)-1);
1053 	}
1054 	retval = *tid_ptr;
1055 	crit_exit();
1056 	return (retval);
1057 } /* ASR_getTid */
1058 
1059 /*
1060  *	Set a TID relationship.
1061  *
1062  *	If the TID was not set, return (tid_t)-1.
1063  *
1064  *	should use mutex rather than spl.
1065  */
1066 static __inline tid_t
1067 ASR_setTid(Asr_softc_t *sc, int bus, int target, int lun, tid_t	TID)
1068 {
1069 	tid_t	*tid_ptr;
1070 
1071 	if (TID != (tid_t)-1) {
1072 		if (TID == 0) {
1073 			return ((tid_t)-1);
1074 		}
1075 		crit_enter();
1076 		if ((tid_ptr = ASR_getTidAddress(sc, bus, target, lun, TRUE))
1077 		 == NULL) {
1078 			crit_exit();
1079 			return ((tid_t)-1);
1080 		}
1081 		*tid_ptr = TID;
1082 		crit_exit();
1083 	}
1084 	return (TID);
1085 } /* ASR_setTid */
1086 
1087 /*-------------------------------------------------------------------------*/
1088 /*		      Function ASR_rescan				   */
1089 /*-------------------------------------------------------------------------*/
1090 /* The Parameters Passed To This Function Are :				   */
1091 /*     Asr_softc_t *	 : HBA miniport driver's adapter data storage.	   */
1092 /*									   */
1093 /* This Function Will rescan the adapter and resynchronize any data	   */
1094 /*									   */
1095 /* Return : 0 For OK, Error Code Otherwise				   */
1096 /*-------------------------------------------------------------------------*/
1097 
1098 static int
1099 ASR_rescan(Asr_softc_t *sc)
1100 {
1101 	int bus;
1102 	int error;
1103 
1104 	/*
1105 	 * Re-acquire the LCT table and synchronize us to the adapter.
1106 	 */
1107 	if ((error = ASR_acquireLct(sc)) == 0) {
1108 		error = ASR_acquireHrt(sc);
1109 	}
1110 
1111 	if (error != 0) {
1112 		return error;
1113 	}
1114 
1115 	bus = sc->ha_MaxBus;
1116 	/* Reset all existing cached TID lookups */
1117 	do {
1118 		int target, event = 0;
1119 
1120 		/*
1121 		 *	Scan for all targets on this bus to see if they
1122 		 * got affected by the rescan.
1123 		 */
1124 		for (target = 0; target <= sc->ha_MaxId; ++target) {
1125 			int lun;
1126 
1127 			/* Stay away from the controller ID */
1128 			if (target == sc->ha_adapter_target[bus]) {
1129 				continue;
1130 			}
1131 			for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
1132 				PI2O_LCT_ENTRY Device;
1133 				tid_t	       TID = (tid_t)-1;
1134 				tid_t	       LastTID;
1135 
1136 				/*
1137 				 * See if the cached TID changed. Search for
1138 				 * the device in our new LCT.
1139 				 */
1140 				for (Device = sc->ha_LCT->LCTEntry;
1141 				  Device < (PI2O_LCT_ENTRY)(((U32 *)sc->ha_LCT)
1142 				   + I2O_LCT_getTableSize(sc->ha_LCT));
1143 				  ++Device) {
1144 					if ((Device->le_type != I2O_UNKNOWN)
1145 					 && (Device->le_bus == bus)
1146 					 && (Device->le_target == target)
1147 					 && (Device->le_lun == lun)
1148 					 && (I2O_LCT_ENTRY_getUserTID(Device)
1149 					  == 0xFFF)) {
1150 						TID = I2O_LCT_ENTRY_getLocalTID(
1151 						  Device);
1152 						break;
1153 					}
1154 				}
1155 				/*
1156 				 * Indicate to the OS that the label needs
1157 				 * to be recalculated, or that the specific
1158 				 * open device is no longer valid (Merde)
1159 				 * because the cached TID changed.
1160 				 */
1161 				LastTID = ASR_getTid (sc, bus, target, lun);
1162 				if (LastTID != TID) {
1163 					struct cam_path * path;
1164 
1165 					if (xpt_create_path(&path,
1166 					  /*periph*/NULL,
1167 					  cam_sim_path(sc->ha_sim[bus]),
1168 					  target, lun) != CAM_REQ_CMP) {
1169 						if (TID == (tid_t)-1) {
1170 							event |= AC_LOST_DEVICE;
1171 						} else {
1172 							event |= AC_INQ_CHANGED
1173 							       | AC_GETDEV_CHANGED;
1174 						}
1175 					} else {
1176 						if (TID == (tid_t)-1) {
1177 							xpt_async(
1178 							  AC_LOST_DEVICE,
1179 							  path, NULL);
1180 						} else if (LastTID == (tid_t)-1) {
1181 							struct ccb_getdev ccb;
1182 
1183 							xpt_setup_ccb(
1184 							  &(ccb.ccb_h),
1185 							  path, /*priority*/5);
1186 							xpt_async(
1187 							  AC_FOUND_DEVICE,
1188 							  path,
1189 							  &ccb);
1190 						} else {
1191 							xpt_async(
1192 							  AC_INQ_CHANGED,
1193 							  path, NULL);
1194 							xpt_async(
1195 							  AC_GETDEV_CHANGED,
1196 							  path, NULL);
1197 						}
1198 					}
1199 				}
1200 				/*
1201 				 *	We have the option of clearing the
1202 				 * cached TID for it to be rescanned, or to
1203 				 * set it now even if the device never got
1204 				 * accessed. We chose the later since we
1205 				 * currently do not use the condition that
1206 				 * the TID ever got cached.
1207 				 */
1208 				ASR_setTid (sc, bus, target, lun, TID);
1209 			}
1210 		}
1211 		/*
1212 		 *	The xpt layer can not handle multiple events at the
1213 		 * same call.
1214 		 */
1215 		if (event & AC_LOST_DEVICE) {
1216 			xpt_async(AC_LOST_DEVICE, sc->ha_path[bus], NULL);
1217 		}
1218 		if (event & AC_INQ_CHANGED) {
1219 			xpt_async(AC_INQ_CHANGED, sc->ha_path[bus], NULL);
1220 		}
1221 		if (event & AC_GETDEV_CHANGED) {
1222 			xpt_async(AC_GETDEV_CHANGED, sc->ha_path[bus], NULL);
1223 		}
1224 	} while (--bus >= 0);
1225 	return (error);
1226 } /* ASR_rescan */
1227 
1228 /*-------------------------------------------------------------------------*/
1229 /*		      Function ASR_reset				   */
1230 /*-------------------------------------------------------------------------*/
1231 /* The Parameters Passed To This Function Are :				   */
1232 /*     Asr_softc_t *	  : HBA miniport driver's adapter data storage.	   */
1233 /*									   */
1234 /* This Function Will reset the adapter and resynchronize any data	   */
1235 /*									   */
1236 /* Return : None							   */
1237 /*-------------------------------------------------------------------------*/
1238 
1239 static int
1240 ASR_reset(Asr_softc_t *sc)
1241 {
1242 	int retVal;
1243 
1244 	crit_enter();
1245 	if ((sc->ha_in_reset == HA_IN_RESET)
1246 	 || (sc->ha_in_reset == HA_OFF_LINE_RECOVERY)) {
1247 		crit_exit();
1248 		return (EBUSY);
1249 	}
1250 	/*
1251 	 *	Promotes HA_OPERATIONAL to HA_IN_RESET,
1252 	 * or HA_OFF_LINE to HA_OFF_LINE_RECOVERY.
1253 	 */
1254 	++(sc->ha_in_reset);
1255 	if (ASR_resetIOP(sc) == 0) {
1256 		debug_asr_printf ("ASR_resetIOP failed\n");
1257 		/*
1258 		 *	We really need to take this card off-line, easier said
1259 		 * than make sense. Better to keep retrying for now since if a
1260 		 * UART cable is connected the blinkLEDs the adapter is now in
1261 		 * a hard state requiring action from the monitor commands to
1262 		 * the HBA to continue. For debugging waiting forever is a
1263 		 * good thing. In a production system, however, one may wish
1264 		 * to instead take the card off-line ...
1265 		 */
1266 		/* Wait Forever */
1267 		while (ASR_resetIOP(sc) == 0);
1268 	}
1269 	retVal = ASR_init (sc);
1270 	crit_exit();
1271 	if (retVal != 0) {
1272 		debug_asr_printf ("ASR_init failed\n");
1273 		sc->ha_in_reset = HA_OFF_LINE;
1274 		return (ENXIO);
1275 	}
1276 	if (ASR_rescan (sc) != 0) {
1277 		debug_asr_printf ("ASR_rescan failed\n");
1278 	}
1279 	ASR_failActiveCommands (sc);
1280 	if (sc->ha_in_reset == HA_OFF_LINE_RECOVERY) {
1281 		kprintf ("asr%d: Brining adapter back on-line\n",
1282 		  sc->ha_path[0]
1283 		    ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1284 		    : 0);
1285 	}
1286 	sc->ha_in_reset = HA_OPERATIONAL;
1287 	return (0);
1288 } /* ASR_reset */
1289 
1290 /*
1291  *	Device timeout handler.
1292  */
1293 static void
1294 asr_timeout(void *arg)
1295 {
1296 	union asr_ccb	*ccb = (union asr_ccb *)arg;
1297 	Asr_softc_t	*sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1298 	int		s;
1299 
1300 	debug_asr_print_path(ccb);
1301 	debug_asr_printf("timed out");
1302 
1303 	/*
1304 	 *	Check if the adapter has locked up?
1305 	 */
1306 	if ((s = ASR_getBlinkLedCode(sc)) != 0) {
1307 		/* Reset Adapter */
1308 		kprintf ("asr%d: Blink LED 0x%x resetting adapter\n",
1309 		  cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), s);
1310 		if (ASR_reset (sc) == ENXIO) {
1311 			/* Try again later */
1312 			callout_reset(&ccb->ccb_h.timeout_ch,
1313 			    (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb);
1314 		}
1315 		return;
1316 	}
1317 	/*
1318 	 *	Abort does not function on the ASR card!!! Walking away from
1319 	 * the SCSI command is also *very* dangerous. A SCSI BUS reset is
1320 	 * our best bet, followed by a complete adapter reset if that fails.
1321 	 */
1322 	crit_enter();
1323 	/* Check if we already timed out once to raise the issue */
1324 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_CMD_TIMEOUT) {
1325 		debug_asr_printf (" AGAIN\nreinitializing adapter\n");
1326 		if (ASR_reset (sc) == ENXIO) {
1327 			callout_reset(&ccb->ccb_h.timeout_ch,
1328 			    (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb);
1329 		}
1330 		crit_exit();
1331 		return;
1332 	}
1333 	debug_asr_printf ("\nresetting bus\n");
1334 	/* If the BUS reset does not take, then an adapter reset is next! */
1335 	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1336 	ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
1337 	callout_reset(&ccb->ccb_h.timeout_ch, (ccb->ccb_h.timeout * hz) / 1000,
1338 		      asr_timeout, ccb);
1339 	ASR_resetBus (sc, cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)));
1340 	xpt_async (AC_BUS_RESET, ccb->ccb_h.path, NULL);
1341 	crit_exit();
1342 } /* asr_timeout */
1343 
1344 /*
1345  * send a message asynchronously
1346  */
1347 static int
1348 ASR_queue(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
1349 {
1350 	U32		MessageOffset;
1351 	union asr_ccb	*ccb;
1352 
1353 	debug_asr_printf("Host Command Dump:\n");
1354 	debug_asr_dump_message(Message);
1355 
1356 	ccb = (union asr_ccb *)(long)
1357 	  I2O_MESSAGE_FRAME_getInitiatorContext64(Message);
1358 
1359 	if ((MessageOffset = ASR_getMessage(sc)) != EMPTY_QUEUE) {
1360 		asr_set_frame(sc, Message, MessageOffset,
1361 			      I2O_MESSAGE_FRAME_getMessageSize(Message));
1362 		if (ccb) {
1363 			ASR_ccbAdd (sc, ccb);
1364 		}
1365 		/* Post the command */
1366 		asr_set_ToFIFO(sc, MessageOffset);
1367 	} else {
1368 		if (ASR_getBlinkLedCode(sc)) {
1369 			/*
1370 			 *	Unlikely we can do anything if we can't grab a
1371 			 * message frame :-(, but lets give it a try.
1372 			 */
1373 			(void)ASR_reset(sc);
1374 		}
1375 	}
1376 	return (MessageOffset);
1377 } /* ASR_queue */
1378 
1379 
1380 /* Simple Scatter Gather elements */
1381 #define	SG(SGL,Index,Flags,Buffer,Size)				   \
1382 	I2O_FLAGS_COUNT_setCount(				   \
1383 	  &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1384 	  Size);						   \
1385 	I2O_FLAGS_COUNT_setFlags(				   \
1386 	  &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1387 	  I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | (Flags));	   \
1388 	I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(		   \
1389 	  &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index]),		   \
1390 	  (Buffer == NULL) ? 0 : KVTOPHYS(Buffer))
1391 
1392 /*
1393  *	Retrieve Parameter Group.
1394  */
1395 static void *
1396 ASR_getParams(Asr_softc_t *sc, tid_t TID, int Group, void *Buffer,
1397 	      unsigned BufferSize)
1398 {
1399 	struct paramGetMessage {
1400 		I2O_UTIL_PARAMS_GET_MESSAGE M;
1401 		char
1402 		   F[sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)];
1403 		struct Operations {
1404 			I2O_PARAM_OPERATIONS_LIST_HEADER Header;
1405 			I2O_PARAM_OPERATION_ALL_TEMPLATE Template[1];
1406 		}			     O;
1407 	}				Message;
1408 	struct Operations		*Operations_Ptr;
1409 	I2O_UTIL_PARAMS_GET_MESSAGE	*Message_Ptr;
1410 	struct ParamBuffer {
1411 		I2O_PARAM_RESULTS_LIST_HEADER	    Header;
1412 		I2O_PARAM_READ_OPERATION_RESULT	    Read;
1413 		char				    Info[1];
1414 	}				*Buffer_Ptr;
1415 
1416 	Message_Ptr = (I2O_UTIL_PARAMS_GET_MESSAGE *)ASR_fillMessage(&Message,
1417 	  sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1418 	    + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1419 	Operations_Ptr = (struct Operations *)((char *)Message_Ptr
1420 	  + sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1421 	  + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1422 	bzero(Operations_Ptr, sizeof(struct Operations));
1423 	I2O_PARAM_OPERATIONS_LIST_HEADER_setOperationCount(
1424 	  &(Operations_Ptr->Header), 1);
1425 	I2O_PARAM_OPERATION_ALL_TEMPLATE_setOperation(
1426 	  &(Operations_Ptr->Template[0]), I2O_PARAMS_OPERATION_FIELD_GET);
1427 	I2O_PARAM_OPERATION_ALL_TEMPLATE_setFieldCount(
1428 	  &(Operations_Ptr->Template[0]), 0xFFFF);
1429 	I2O_PARAM_OPERATION_ALL_TEMPLATE_setGroupNumber(
1430 	  &(Operations_Ptr->Template[0]), Group);
1431 	Buffer_Ptr = (struct ParamBuffer *)Buffer;
1432 	bzero(Buffer_Ptr, BufferSize);
1433 
1434 	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1435 	  I2O_VERSION_11
1436 	  + (((sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1437 	    / sizeof(U32)) << 4));
1438 	I2O_MESSAGE_FRAME_setTargetAddress (&(Message_Ptr->StdMessageFrame),
1439 	  TID);
1440 	I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
1441 	  I2O_UTIL_PARAMS_GET);
1442 	/*
1443 	 *  Set up the buffers as scatter gather elements.
1444 	 */
1445 	SG(&(Message_Ptr->SGL), 0,
1446 	  I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER,
1447 	  Operations_Ptr, sizeof(struct Operations));
1448 	SG(&(Message_Ptr->SGL), 1,
1449 	  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1450 	  Buffer_Ptr, BufferSize);
1451 
1452 	if ((ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) == CAM_REQ_CMP)
1453 	 && (Buffer_Ptr->Header.ResultCount)) {
1454 		return ((void *)(Buffer_Ptr->Info));
1455 	}
1456 	return (NULL);
1457 } /* ASR_getParams */
1458 
1459 /*
1460  *	Acquire the LCT information.
1461  */
1462 static int
1463 ASR_acquireLct(Asr_softc_t *sc)
1464 {
1465 	PI2O_EXEC_LCT_NOTIFY_MESSAGE	Message_Ptr;
1466 	PI2O_SGE_SIMPLE_ELEMENT		sg;
1467 	int				MessageSizeInBytes;
1468 	caddr_t				v;
1469 	int				len;
1470 	I2O_LCT				Table;
1471 	PI2O_LCT_ENTRY			Entry;
1472 
1473 	/*
1474 	 *	sc value assumed valid
1475 	 */
1476 	MessageSizeInBytes = sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) -
1477 	    sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT);
1478 	if ((Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)kmalloc(
1479 	    MessageSizeInBytes, M_TEMP, M_WAITOK)) == NULL) {
1480 		return (ENOMEM);
1481 	}
1482 	(void)ASR_fillMessage((void *)Message_Ptr, MessageSizeInBytes);
1483 	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1484 	    (I2O_VERSION_11 + (((sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) -
1485 	    sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4)));
1486 	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1487 	    I2O_EXEC_LCT_NOTIFY);
1488 	I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
1489 	    I2O_CLASS_MATCH_ANYCLASS);
1490 	/*
1491 	 *	Call the LCT table to determine the number of device entries
1492 	 * to reserve space for.
1493 	 */
1494 	SG(&(Message_Ptr->SGL), 0,
1495 	  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, &Table,
1496 	  sizeof(I2O_LCT));
1497 	/*
1498 	 *	since this code is reused in several systems, code efficiency
1499 	 * is greater by using a shift operation rather than a divide by
1500 	 * sizeof(u_int32_t).
1501 	 */
1502 	I2O_LCT_setTableSize(&Table,
1503 	  (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1504 	(void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1505 	/*
1506 	 *	Determine the size of the LCT table.
1507 	 */
1508 	if (sc->ha_LCT) {
1509 		kfree(sc->ha_LCT, M_TEMP);
1510 	}
1511 	/*
1512 	 *	malloc only generates contiguous memory when less than a
1513 	 * page is expected. We must break the request up into an SG list ...
1514 	 */
1515 	if (((len = (I2O_LCT_getTableSize(&Table) << 2)) <=
1516 	  (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)))
1517 	 || (len > (128 * 1024))) {	/* Arbitrary */
1518 		kfree(Message_Ptr, M_TEMP);
1519 		return (EINVAL);
1520 	}
1521 	if ((sc->ha_LCT = (PI2O_LCT)kmalloc (len, M_TEMP, M_WAITOK)) == NULL) {
1522 		kfree(Message_Ptr, M_TEMP);
1523 		return (ENOMEM);
1524 	}
1525 	/*
1526 	 *	since this code is reused in several systems, code efficiency
1527 	 * is greater by using a shift operation rather than a divide by
1528 	 * sizeof(u_int32_t).
1529 	 */
1530 	I2O_LCT_setTableSize(sc->ha_LCT,
1531 	  (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1532 	/*
1533 	 *	Convert the access to the LCT table into a SG list.
1534 	 */
1535 	sg = Message_Ptr->SGL.u.Simple;
1536 	v = (caddr_t)(sc->ha_LCT);
1537 	for (;;) {
1538 		int next, base, span;
1539 
1540 		span = 0;
1541 		next = base = KVTOPHYS(v);
1542 		I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1543 
1544 		/* How far can we go contiguously */
1545 		while ((len > 0) && (base == next)) {
1546 			int size;
1547 
1548 			next = trunc_page(base) + PAGE_SIZE;
1549 			size = next - base;
1550 			if (size > len) {
1551 				size = len;
1552 			}
1553 			span += size;
1554 			v += size;
1555 			len -= size;
1556 			base = KVTOPHYS(v);
1557 		}
1558 
1559 		/* Construct the Flags */
1560 		I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1561 		{
1562 			int rw = I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT;
1563 			if (len <= 0) {
1564 				rw = (I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT
1565 				    | I2O_SGL_FLAGS_LAST_ELEMENT
1566 				    | I2O_SGL_FLAGS_END_OF_BUFFER);
1567 			}
1568 			I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), rw);
1569 		}
1570 
1571 		if (len <= 0) {
1572 			break;
1573 		}
1574 
1575 		/*
1576 		 * Incrementing requires resizing of the packet.
1577 		 */
1578 		++sg;
1579 		MessageSizeInBytes += sizeof(*sg);
1580 		I2O_MESSAGE_FRAME_setMessageSize(
1581 		  &(Message_Ptr->StdMessageFrame),
1582 		  I2O_MESSAGE_FRAME_getMessageSize(
1583 		    &(Message_Ptr->StdMessageFrame))
1584 		  + (sizeof(*sg) / sizeof(U32)));
1585 		{
1586 			PI2O_EXEC_LCT_NOTIFY_MESSAGE NewMessage_Ptr;
1587 
1588 			if ((NewMessage_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)
1589 			    kmalloc(MessageSizeInBytes, M_TEMP, M_WAITOK))
1590 			    == NULL) {
1591 				kfree(sc->ha_LCT, M_TEMP);
1592 				sc->ha_LCT = NULL;
1593 				kfree(Message_Ptr, M_TEMP);
1594 				return (ENOMEM);
1595 			}
1596 			span = ((caddr_t)sg) - (caddr_t)Message_Ptr;
1597 			bcopy(Message_Ptr, NewMessage_Ptr, span);
1598 			kfree(Message_Ptr, M_TEMP);
1599 			sg = (PI2O_SGE_SIMPLE_ELEMENT)
1600 			  (((caddr_t)NewMessage_Ptr) + span);
1601 			Message_Ptr = NewMessage_Ptr;
1602 		}
1603 	}
1604 	{	int retval;
1605 
1606 		retval = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1607 		kfree(Message_Ptr, M_TEMP);
1608 		if (retval != CAM_REQ_CMP) {
1609 			return (ENODEV);
1610 		}
1611 	}
1612 	/* If the LCT table grew, lets truncate accesses */
1613 	if (I2O_LCT_getTableSize(&Table) < I2O_LCT_getTableSize(sc->ha_LCT)) {
1614 		I2O_LCT_setTableSize(sc->ha_LCT, I2O_LCT_getTableSize(&Table));
1615 	}
1616 	for (Entry = sc->ha_LCT->LCTEntry; Entry < (PI2O_LCT_ENTRY)
1617 	  (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1618 	  ++Entry) {
1619 		Entry->le_type = I2O_UNKNOWN;
1620 		switch (I2O_CLASS_ID_getClass(&(Entry->ClassID))) {
1621 
1622 		case I2O_CLASS_RANDOM_BLOCK_STORAGE:
1623 			Entry->le_type = I2O_BSA;
1624 			break;
1625 
1626 		case I2O_CLASS_SCSI_PERIPHERAL:
1627 			Entry->le_type = I2O_SCSI;
1628 			break;
1629 
1630 		case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
1631 			Entry->le_type = I2O_FCA;
1632 			break;
1633 
1634 		case I2O_CLASS_BUS_ADAPTER_PORT:
1635 			Entry->le_type = I2O_PORT | I2O_SCSI;
1636 			/* FALLTHRU */
1637 		case I2O_CLASS_FIBRE_CHANNEL_PORT:
1638 			if (I2O_CLASS_ID_getClass(&(Entry->ClassID)) ==
1639 			  I2O_CLASS_FIBRE_CHANNEL_PORT) {
1640 				Entry->le_type = I2O_PORT | I2O_FCA;
1641 			}
1642 		{	struct ControllerInfo {
1643 				I2O_PARAM_RESULTS_LIST_HEADER	    Header;
1644 				I2O_PARAM_READ_OPERATION_RESULT	    Read;
1645 				I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1646 			} Buffer;
1647 			PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1648 
1649 			Entry->le_bus = 0xff;
1650 			Entry->le_target = 0xff;
1651 			Entry->le_lun = 0xff;
1652 
1653 			if ((Info = (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)
1654 			  ASR_getParams(sc,
1655 			    I2O_LCT_ENTRY_getLocalTID(Entry),
1656 			    I2O_HBA_SCSI_CONTROLLER_INFO_GROUP_NO,
1657 			    &Buffer, sizeof(struct ControllerInfo))) == NULL) {
1658 				continue;
1659 			}
1660 			Entry->le_target
1661 			  = I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR_getInitiatorID(
1662 			    Info);
1663 			Entry->le_lun = 0;
1664 		}	/* FALLTHRU */
1665 		default:
1666 			continue;
1667 		}
1668 		{	struct DeviceInfo {
1669 				I2O_PARAM_RESULTS_LIST_HEADER	Header;
1670 				I2O_PARAM_READ_OPERATION_RESULT Read;
1671 				I2O_DPT_DEVICE_INFO_SCALAR	Info;
1672 			} Buffer;
1673 			PI2O_DPT_DEVICE_INFO_SCALAR	 Info;
1674 
1675 			Entry->le_bus = 0xff;
1676 			Entry->le_target = 0xff;
1677 			Entry->le_lun = 0xff;
1678 
1679 			if ((Info = (PI2O_DPT_DEVICE_INFO_SCALAR)
1680 			  ASR_getParams(sc,
1681 			    I2O_LCT_ENTRY_getLocalTID(Entry),
1682 			    I2O_DPT_DEVICE_INFO_GROUP_NO,
1683 			    &Buffer, sizeof(struct DeviceInfo))) == NULL) {
1684 				continue;
1685 			}
1686 			Entry->le_type
1687 			  |= I2O_DPT_DEVICE_INFO_SCALAR_getDeviceType(Info);
1688 			Entry->le_bus
1689 			  = I2O_DPT_DEVICE_INFO_SCALAR_getBus(Info);
1690 			if ((Entry->le_bus > sc->ha_MaxBus)
1691 			 && (Entry->le_bus <= MAX_CHANNEL)) {
1692 				sc->ha_MaxBus = Entry->le_bus;
1693 			}
1694 			Entry->le_target
1695 			  = I2O_DPT_DEVICE_INFO_SCALAR_getIdentifier(Info);
1696 			Entry->le_lun
1697 			  = I2O_DPT_DEVICE_INFO_SCALAR_getLunInfo(Info);
1698 		}
1699 	}
1700 	/*
1701 	 *	A zero return value indicates success.
1702 	 */
1703 	return (0);
1704 } /* ASR_acquireLct */
1705 
1706 /*
1707  * Initialize a message frame.
1708  * We assume that the CDB has already been set up, so all we do here is
1709  * generate the Scatter Gather list.
1710  */
1711 static PI2O_MESSAGE_FRAME
1712 ASR_init_message(union asr_ccb *ccb, PI2O_MESSAGE_FRAME	Message)
1713 {
1714 	PI2O_MESSAGE_FRAME	Message_Ptr;
1715 	PI2O_SGE_SIMPLE_ELEMENT sg;
1716 	Asr_softc_t		*sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1717 	vm_size_t		size, len;
1718 	caddr_t			v;
1719 	U32			MessageSize;
1720 	int			next, span, base, rw;
1721 	int			target = ccb->ccb_h.target_id;
1722 	int			lun = ccb->ccb_h.target_lun;
1723 	int			bus =cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1724 	tid_t			TID;
1725 
1726 	/* We only need to zero out the PRIVATE_SCSI_SCB_EXECUTE_MESSAGE */
1727 	Message_Ptr = (I2O_MESSAGE_FRAME *)Message;
1728 	bzero(Message_Ptr, (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
1729 	      sizeof(I2O_SG_ELEMENT)));
1730 
1731 	if ((TID = ASR_getTid (sc, bus, target, lun)) == (tid_t)-1) {
1732 		PI2O_LCT_ENTRY Device;
1733 
1734 		TID = 0;
1735 		for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
1736 		    (((U32 *)sc->ha_LCT) + I2O_LCT_getTableSize(sc->ha_LCT));
1737 		    ++Device) {
1738 			if ((Device->le_type != I2O_UNKNOWN)
1739 			 && (Device->le_bus == bus)
1740 			 && (Device->le_target == target)
1741 			 && (Device->le_lun == lun)
1742 			 && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) {
1743 				TID = I2O_LCT_ENTRY_getLocalTID(Device);
1744 				ASR_setTid(sc, Device->le_bus,
1745 					   Device->le_target, Device->le_lun,
1746 					   TID);
1747 				break;
1748 			}
1749 		}
1750 	}
1751 	if (TID == (tid_t)0) {
1752 		return (NULL);
1753 	}
1754 	I2O_MESSAGE_FRAME_setTargetAddress(Message_Ptr, TID);
1755 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(
1756 	    (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, TID);
1757 	I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11 |
1758 	  (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1759 		/ sizeof(U32)) << 4));
1760 	I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
1761 	  (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1762 	  - sizeof(I2O_SG_ELEMENT)) / sizeof(U32));
1763 	I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
1764 	I2O_MESSAGE_FRAME_setFunction(Message_Ptr, I2O_PRIVATE_MESSAGE);
1765 	I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
1766 	  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
1767 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1768 	  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1769 	    I2O_SCB_FLAG_ENABLE_DISCONNECT
1770 	  | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1771 	  | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
1772 	/*
1773 	 * We do not need any (optional byteswapping) method access to
1774 	 * the Initiator & Transaction context field.
1775 	 */
1776 	I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
1777 
1778 	I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
1779 	  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID);
1780 	/*
1781 	 * copy the cdb over
1782 	 */
1783 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(
1784 	    (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, ccb->csio.cdb_len);
1785 	bcopy(&(ccb->csio.cdb_io),
1786 	    ((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->CDB,
1787 	    ccb->csio.cdb_len);
1788 
1789 	/*
1790 	 * Given a buffer describing a transfer, set up a scatter/gather map
1791 	 * in a ccb to map that SCSI transfer.
1792 	 */
1793 
1794 	rw = (ccb->ccb_h.flags & CAM_DIR_IN) ? 0 : I2O_SGL_FLAGS_DIR;
1795 
1796 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1797 	  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1798 	  (ccb->csio.dxfer_len)
1799 	    ? ((rw) ? (I2O_SCB_FLAG_XFER_TO_DEVICE
1800 		     | I2O_SCB_FLAG_ENABLE_DISCONNECT
1801 		     | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1802 		     | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)
1803 		    : (I2O_SCB_FLAG_XFER_FROM_DEVICE
1804 		     | I2O_SCB_FLAG_ENABLE_DISCONNECT
1805 		     | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1806 		     | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER))
1807 	    :	      (I2O_SCB_FLAG_ENABLE_DISCONNECT
1808 		     | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1809 		     | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
1810 
1811 	/*
1812 	 * Given a transfer described by a `data', fill in the SG list.
1813 	 */
1814 	sg = &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->SGL.u.Simple[0];
1815 
1816 	len = ccb->csio.dxfer_len;
1817 	v = ccb->csio.data_ptr;
1818 	KASSERT(ccb->csio.dxfer_len >= 0, ("csio.dxfer_len < 0"));
1819 	MessageSize = I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr);
1820 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
1821 	  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, len);
1822 	while ((len > 0) && (sg < &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1823 	  Message_Ptr)->SGL.u.Simple[SG_SIZE])) {
1824 		span = 0;
1825 		next = base = KVTOPHYS(v);
1826 		I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1827 
1828 		/* How far can we go contiguously */
1829 		while ((len > 0) && (base == next)) {
1830 			next = trunc_page(base) + PAGE_SIZE;
1831 			size = next - base;
1832 			if (size > len) {
1833 				size = len;
1834 			}
1835 			span += size;
1836 			v += size;
1837 			len -= size;
1838 			base = KVTOPHYS(v);
1839 		}
1840 
1841 		I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1842 		if (len == 0) {
1843 			rw |= I2O_SGL_FLAGS_LAST_ELEMENT;
1844 		}
1845 		I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount),
1846 		  I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | rw);
1847 		++sg;
1848 		MessageSize += sizeof(*sg) / sizeof(U32);
1849 	}
1850 	/* We always do the request sense ... */
1851 	if ((span = ccb->csio.sense_len) == 0) {
1852 		span = sizeof(ccb->csio.sense_data);
1853 	}
1854 	SG(sg, 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1855 	  &(ccb->csio.sense_data), span);
1856 	I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
1857 	  MessageSize + (sizeof(*sg) / sizeof(U32)));
1858 	return (Message_Ptr);
1859 } /* ASR_init_message */
1860 
1861 /*
1862  *	Reset the adapter.
1863  */
1864 static U32
1865 ASR_initOutBound(Asr_softc_t *sc)
1866 {
1867 	struct initOutBoundMessage {
1868 		I2O_EXEC_OUTBOUND_INIT_MESSAGE M;
1869 		U32			       R;
1870 	}				Message;
1871 	PI2O_EXEC_OUTBOUND_INIT_MESSAGE	Message_Ptr;
1872 	U32				*volatile Reply_Ptr;
1873 	U32				Old;
1874 
1875 	/*
1876 	 *  Build up our copy of the Message.
1877 	 */
1878 	Message_Ptr = (PI2O_EXEC_OUTBOUND_INIT_MESSAGE)ASR_fillMessage(&Message,
1879 	  sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE));
1880 	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1881 	  I2O_EXEC_OUTBOUND_INIT);
1882 	I2O_EXEC_OUTBOUND_INIT_MESSAGE_setHostPageFrameSize(Message_Ptr, PAGE_SIZE);
1883 	I2O_EXEC_OUTBOUND_INIT_MESSAGE_setOutboundMFrameSize(Message_Ptr,
1884 	  sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME));
1885 	/*
1886 	 *  Reset the Reply Status
1887 	 */
1888 	*(Reply_Ptr = (U32 *)((char *)Message_Ptr
1889 	  + sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE))) = 0;
1890 	SG (&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT, Reply_Ptr,
1891 	  sizeof(U32));
1892 	/*
1893 	 *	Send the Message out
1894 	 */
1895 	if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
1896 	    0xffffffff) {
1897 		u_long size, addr;
1898 
1899 		/*
1900 		 *	Wait for a response (Poll).
1901 		 */
1902 		while (*Reply_Ptr < I2O_EXEC_OUTBOUND_INIT_REJECTED);
1903 		/*
1904 		 *	Re-enable the interrupts.
1905 		 */
1906 		asr_set_intr(sc, Old);
1907 		/*
1908 		 *	Populate the outbound table.
1909 		 */
1910 		if (sc->ha_Msgs == NULL) {
1911 
1912 			/* Allocate the reply frames */
1913 			size = sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
1914 			  * sc->ha_Msgs_Count;
1915 
1916 			/*
1917 			 *	contigmalloc only works reliably at
1918 			 * initialization time.
1919 			 */
1920 			if ((sc->ha_Msgs = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
1921 			  contigmalloc (size, M_DEVBUF, M_WAITOK, 0ul,
1922 			    0xFFFFFFFFul, (u_long)sizeof(U32), 0ul)) != NULL) {
1923 				bzero(sc->ha_Msgs, size);
1924 				sc->ha_Msgs_Phys = KVTOPHYS(sc->ha_Msgs);
1925 			}
1926 		}
1927 
1928 		/* Initialize the outbound FIFO */
1929 		if (sc->ha_Msgs != NULL)
1930 			for(size = sc->ha_Msgs_Count, addr = sc->ha_Msgs_Phys;
1931 			    size; --size) {
1932 				asr_set_FromFIFO(sc, addr);
1933 				addr +=
1934 				    sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME);
1935 			}
1936 		return (*Reply_Ptr);
1937 	}
1938 	return (0);
1939 } /* ASR_initOutBound */
1940 
1941 /*
1942  *	Set the system table
1943  */
1944 static int
1945 ASR_setSysTab(Asr_softc_t *sc)
1946 {
1947 	PI2O_EXEC_SYS_TAB_SET_MESSAGE Message_Ptr;
1948 	PI2O_SET_SYSTAB_HEADER	      SystemTable;
1949 	Asr_softc_t		    * ha;
1950 	PI2O_SGE_SIMPLE_ELEMENT	      sg;
1951 	int			      retVal;
1952 
1953 	if ((SystemTable = (PI2O_SET_SYSTAB_HEADER)kmalloc (
1954 	  sizeof(I2O_SET_SYSTAB_HEADER), M_TEMP, M_WAITOK | M_ZERO)) == NULL) {
1955 		return (ENOMEM);
1956 	}
1957 	for (ha = Asr_softc_list; ha; ha = ha->ha_next) {
1958 		++SystemTable->NumberEntries;
1959 	}
1960 	if ((Message_Ptr = (PI2O_EXEC_SYS_TAB_SET_MESSAGE)kmalloc (
1961 	  sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
1962 	   + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)),
1963 	  M_TEMP, M_WAITOK)) == NULL) {
1964 		kfree(SystemTable, M_TEMP);
1965 		return (ENOMEM);
1966 	}
1967 	(void)ASR_fillMessage((void *)Message_Ptr,
1968 	  sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
1969 	   + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)));
1970 	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1971 	  (I2O_VERSION_11 +
1972 	  (((sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1973 			/ sizeof(U32)) << 4)));
1974 	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1975 	  I2O_EXEC_SYS_TAB_SET);
1976 	/*
1977 	 *	Call the LCT table to determine the number of device entries
1978 	 * to reserve space for.
1979 	 *	since this code is reused in several systems, code efficiency
1980 	 * is greater by using a shift operation rather than a divide by
1981 	 * sizeof(u_int32_t).
1982 	 */
1983 	sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
1984 	  + ((I2O_MESSAGE_FRAME_getVersionOffset(
1985 	      &(Message_Ptr->StdMessageFrame)) & 0xF0) >> 2));
1986 	SG(sg, 0, I2O_SGL_FLAGS_DIR, SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
1987 	++sg;
1988 	for (ha = Asr_softc_list; ha; ha = ha->ha_next) {
1989 		SG(sg, 0,
1990 		  ((ha->ha_next)
1991 		    ? (I2O_SGL_FLAGS_DIR)
1992 		    : (I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER)),
1993 		  &(ha->ha_SystemTable), sizeof(ha->ha_SystemTable));
1994 		++sg;
1995 	}
1996 	SG(sg, 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
1997 	SG(sg, 1, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_LAST_ELEMENT
1998 	    | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
1999 	retVal = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2000 	kfree(Message_Ptr, M_TEMP);
2001 	kfree(SystemTable, M_TEMP);
2002 	return (retVal);
2003 } /* ASR_setSysTab */
2004 
2005 static int
2006 ASR_acquireHrt(Asr_softc_t *sc)
2007 {
2008 	I2O_EXEC_HRT_GET_MESSAGE	Message;
2009 	I2O_EXEC_HRT_GET_MESSAGE	*Message_Ptr;
2010 	struct {
2011 		I2O_HRT	      Header;
2012 		I2O_HRT_ENTRY Entry[MAX_CHANNEL];
2013 	}				Hrt;
2014 	u_int8_t			NumberOfEntries;
2015 	PI2O_HRT_ENTRY			Entry;
2016 
2017 	bzero(&Hrt, sizeof (Hrt));
2018 	Message_Ptr = (I2O_EXEC_HRT_GET_MESSAGE *)ASR_fillMessage(&Message,
2019 	  sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2020 	  + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2021 	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2022 	  (I2O_VERSION_11
2023 	  + (((sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2024 		   / sizeof(U32)) << 4)));
2025 	I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
2026 	  I2O_EXEC_HRT_GET);
2027 
2028 	/*
2029 	 *  Set up the buffers as scatter gather elements.
2030 	 */
2031 	SG(&(Message_Ptr->SGL), 0,
2032 	  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2033 	  &Hrt, sizeof(Hrt));
2034 	if (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != CAM_REQ_CMP) {
2035 		return (ENODEV);
2036 	}
2037 	if ((NumberOfEntries = I2O_HRT_getNumberEntries(&Hrt.Header))
2038 	  > (MAX_CHANNEL + 1)) {
2039 		NumberOfEntries = MAX_CHANNEL + 1;
2040 	}
2041 	for (Entry = Hrt.Header.HRTEntry;
2042 	  NumberOfEntries != 0;
2043 	  ++Entry, --NumberOfEntries) {
2044 		PI2O_LCT_ENTRY Device;
2045 
2046 		for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2047 		  (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2048 		  ++Device) {
2049 			if (I2O_LCT_ENTRY_getLocalTID(Device)
2050 			  == (I2O_HRT_ENTRY_getAdapterID(Entry) & 0xFFF)) {
2051 				Device->le_bus = I2O_HRT_ENTRY_getAdapterID(
2052 				  Entry) >> 16;
2053 				if ((Device->le_bus > sc->ha_MaxBus)
2054 				 && (Device->le_bus <= MAX_CHANNEL)) {
2055 					sc->ha_MaxBus = Device->le_bus;
2056 				}
2057 			}
2058 		}
2059 	}
2060 	return (0);
2061 } /* ASR_acquireHrt */
2062 
2063 /*
2064  *	Enable the adapter.
2065  */
2066 static int
2067 ASR_enableSys(Asr_softc_t *sc)
2068 {
2069 	I2O_EXEC_SYS_ENABLE_MESSAGE	Message;
2070 	PI2O_EXEC_SYS_ENABLE_MESSAGE	Message_Ptr;
2071 
2072 	Message_Ptr = (PI2O_EXEC_SYS_ENABLE_MESSAGE)ASR_fillMessage(&Message,
2073 	  sizeof(I2O_EXEC_SYS_ENABLE_MESSAGE));
2074 	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2075 	  I2O_EXEC_SYS_ENABLE);
2076 	return (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != 0);
2077 } /* ASR_enableSys */
2078 
2079 /*
2080  *	Perform the stages necessary to initialize the adapter
2081  */
2082 static int
2083 ASR_init(Asr_softc_t *sc)
2084 {
2085 	return ((ASR_initOutBound(sc) == 0)
2086 	 || (ASR_setSysTab(sc) != CAM_REQ_CMP)
2087 	 || (ASR_enableSys(sc) != CAM_REQ_CMP));
2088 } /* ASR_init */
2089 
2090 /*
2091  *	Send a Synchronize Cache command to the target device.
2092  */
2093 static void
2094 ASR_sync(Asr_softc_t *sc, int bus, int target, int lun)
2095 {
2096 	tid_t TID;
2097 
2098 	/*
2099 	 * We will not synchronize the device when there are outstanding
2100 	 * commands issued by the OS (this is due to a locked up device,
2101 	 * as the OS normally would flush all outstanding commands before
2102 	 * issuing a shutdown or an adapter reset).
2103 	 */
2104 	if ((sc != NULL)
2105 	 && (LIST_FIRST(&(sc->ha_ccb)) != NULL)
2106 	 && ((TID = ASR_getTid (sc, bus, target, lun)) != (tid_t)-1)
2107 	 && (TID != (tid_t)0)) {
2108 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message;
2109 		PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message_Ptr;
2110 
2111 		Message_Ptr = (PRIVATE_SCSI_SCB_EXECUTE_MESSAGE *)&Message;
2112 		bzero(Message_Ptr, sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2113 		    - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2114 
2115 		I2O_MESSAGE_FRAME_setVersionOffset(
2116 		  (PI2O_MESSAGE_FRAME)Message_Ptr,
2117 		  I2O_VERSION_11
2118 		    | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2119 		    - sizeof(I2O_SG_ELEMENT))
2120 			/ sizeof(U32)) << 4));
2121 		I2O_MESSAGE_FRAME_setMessageSize(
2122 		  (PI2O_MESSAGE_FRAME)Message_Ptr,
2123 		  (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2124 		  - sizeof(I2O_SG_ELEMENT))
2125 			/ sizeof(U32));
2126 		I2O_MESSAGE_FRAME_setInitiatorAddress (
2127 		  (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2128 		I2O_MESSAGE_FRAME_setFunction(
2129 		  (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2130 		I2O_MESSAGE_FRAME_setTargetAddress(
2131 		  (PI2O_MESSAGE_FRAME)Message_Ptr, TID);
2132 		I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2133 		  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2134 		  I2O_SCSI_SCB_EXEC);
2135 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(Message_Ptr, TID);
2136 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2137 		    I2O_SCB_FLAG_ENABLE_DISCONNECT
2138 		  | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2139 		  | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2140 		I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2141 		  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2142 		  DPT_ORGANIZATION_ID);
2143 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2144 		Message_Ptr->CDB[0] = SYNCHRONIZE_CACHE;
2145 		Message_Ptr->CDB[1] = (lun << 5);
2146 
2147 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2148 		  (I2O_SCB_FLAG_XFER_FROM_DEVICE
2149 		    | I2O_SCB_FLAG_ENABLE_DISCONNECT
2150 		    | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2151 		    | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2152 
2153 		(void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2154 
2155 	}
2156 }
2157 
2158 static void
2159 ASR_synchronize(Asr_softc_t *sc)
2160 {
2161 	int bus, target, lun;
2162 
2163 	for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2164 		for (target = 0; target <= sc->ha_MaxId; ++target) {
2165 			for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
2166 				ASR_sync(sc,bus,target,lun);
2167 			}
2168 		}
2169 	}
2170 }
2171 
2172 /*
2173  *	Reset the HBA, targets and BUS.
2174  *		Currently this resets *all* the SCSI busses.
2175  */
2176 static __inline void
2177 asr_hbareset(Asr_softc_t *sc)
2178 {
2179 	ASR_synchronize(sc);
2180 	(void)ASR_reset(sc);
2181 } /* asr_hbareset */
2182 
2183 /*
2184  *	A reduced copy of the real pci_map_mem, incorporating the MAX_MAP
2185  * limit and a reduction in error checking (in the pre 4.0 case).
2186  */
2187 static int
2188 asr_pci_map_mem(device_t dev, Asr_softc_t *sc)
2189 {
2190 	int		rid;
2191 	u_int32_t	p, l, s;
2192 
2193 	/*
2194 	 * I2O specification says we must find first *memory* mapped BAR
2195 	 */
2196 	for (rid = 0; rid < 4; rid++) {
2197 		p = pci_read_config(dev, PCIR_BAR(rid), sizeof(p));
2198 		if ((p & 1) == 0) {
2199 			break;
2200 		}
2201 	}
2202 	/*
2203 	 *	Give up?
2204 	 */
2205 	if (rid >= 4) {
2206 		rid = 0;
2207 	}
2208 	rid = PCIR_BAR(rid);
2209 	p = pci_read_config(dev, rid, sizeof(p));
2210 	pci_write_config(dev, rid, -1, sizeof(p));
2211 	l = 0 - (pci_read_config(dev, rid, sizeof(l)) & ~15);
2212 	pci_write_config(dev, rid, p, sizeof(p));
2213 	if (l > MAX_MAP) {
2214 		l = MAX_MAP;
2215 	}
2216 	/*
2217 	 * The 2005S Zero Channel RAID solution is not a perfect PCI
2218 	 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once
2219 	 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to
2220 	 * BAR0+2MB and sets it's size to 2MB. The IOP registers are
2221 	 * accessible via BAR0, the messaging registers are accessible
2222 	 * via BAR1. If the subdevice code is 50 to 59 decimal.
2223 	 */
2224 	s = pci_read_config(dev, PCIR_DEVVENDOR, sizeof(s));
2225 	if (s != 0xA5111044) {
2226 		s = pci_read_config(dev, PCIR_SUBVEND_0, sizeof(s));
2227 		if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0)
2228 		 && (ADPTDOMINATOR_SUB_ID_START <= s)
2229 		 && (s <= ADPTDOMINATOR_SUB_ID_END)) {
2230 			l = MAX_MAP; /* Conjoined BAR Raptor Daptor */
2231 		}
2232 	}
2233 	p &= ~15;
2234 	sc->ha_mem_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
2235 	  p, p + l, l, RF_ACTIVE);
2236 	if (sc->ha_mem_res == NULL) {
2237 		return (0);
2238 	}
2239 	sc->ha_Base = rman_get_start(sc->ha_mem_res);
2240 	sc->ha_i2o_bhandle = rman_get_bushandle(sc->ha_mem_res);
2241 	sc->ha_i2o_btag = rman_get_bustag(sc->ha_mem_res);
2242 
2243 	if (s == 0xA5111044) { /* Split BAR Raptor Daptor */
2244 		if ((rid += sizeof(u_int32_t)) >= PCIR_BAR(4)) {
2245 			return (0);
2246 		}
2247 		p = pci_read_config(dev, rid, sizeof(p));
2248 		pci_write_config(dev, rid, -1, sizeof(p));
2249 		l = 0 - (pci_read_config(dev, rid, sizeof(l)) & ~15);
2250 		pci_write_config(dev, rid, p, sizeof(p));
2251 		if (l > MAX_MAP) {
2252 			l = MAX_MAP;
2253 		}
2254 		p &= ~15;
2255 		sc->ha_mes_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
2256 		  p, p + l, l, RF_ACTIVE);
2257 		if (sc->ha_mes_res == NULL) {
2258 			return (0);
2259 		}
2260 		sc->ha_frame_bhandle = rman_get_bushandle(sc->ha_mes_res);
2261 		sc->ha_frame_btag = rman_get_bustag(sc->ha_mes_res);
2262 	} else {
2263 		sc->ha_frame_bhandle = sc->ha_i2o_bhandle;
2264 		sc->ha_frame_btag = sc->ha_i2o_btag;
2265 	}
2266 	return (1);
2267 } /* asr_pci_map_mem */
2268 
2269 /*
2270  *	A simplified copy of the real pci_map_int with additional
2271  * registration requirements.
2272  */
2273 static int
2274 asr_pci_map_int(device_t dev, Asr_softc_t *sc)
2275 {
2276 	int rid = 0;
2277 
2278 	sc->ha_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2279 	  RF_ACTIVE | RF_SHAREABLE);
2280 	if (sc->ha_irq_res == NULL) {
2281 		return (0);
2282 	}
2283 	if (bus_setup_intr(dev, sc->ha_irq_res, 0,
2284 	  (driver_intr_t *)asr_intr, (void *)sc, &(sc->ha_intr), NULL)) {
2285 		return (0);
2286 	}
2287 	sc->ha_irq = pci_read_config(dev, PCIR_INTLINE, sizeof(char));
2288 	return (1);
2289 } /* asr_pci_map_int */
2290 
2291 static void
2292 asr_status_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2293 {
2294 	Asr_softc_t *sc;
2295 
2296 	if (error)
2297 		return;
2298 
2299 	sc = (Asr_softc_t *)arg;
2300 
2301 	/* XXX
2302 	 * The status word can be at a 64-bit address, but the existing
2303 	 * accessor macros simply cannot manipulate 64-bit addresses.
2304 	 */
2305 	sc->ha_status_phys = (u_int32_t)segs[0].ds_addr +
2306 	    offsetof(struct Asr_status_mem, status);
2307 	sc->ha_rstatus_phys = (u_int32_t)segs[0].ds_addr +
2308 	    offsetof(struct Asr_status_mem, rstatus);
2309 }
2310 
2311 static int
2312 asr_alloc_dma(Asr_softc_t *sc)
2313 {
2314 	device_t dev;
2315 
2316 	dev = sc->ha_dev;
2317 
2318 	if (bus_dma_tag_create(NULL,			/* parent */
2319 			       1, 0,			/* algnmnt, boundary */
2320 			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2321 			       BUS_SPACE_MAXADDR,	/* highaddr */
2322 			       NULL, NULL,		/* filter, filterarg */
2323 			       BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
2324 			       BUS_SPACE_UNRESTRICTED,	/* nsegments */
2325 			       BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
2326 			       0,			/* flags */
2327 			       &sc->ha_parent_dmat)) {
2328 		device_printf(dev, "Cannot allocate parent DMA tag\n");
2329 		return (ENOMEM);
2330 	}
2331 
2332 	if (bus_dma_tag_create(sc->ha_parent_dmat,	/* parent */
2333 			       1, 0,			/* algnmnt, boundary */
2334 			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2335 			       BUS_SPACE_MAXADDR,	/* highaddr */
2336 			       NULL, NULL,		/* filter, filterarg */
2337 			       sizeof(sc->ha_statusmem),/* maxsize */
2338 			       1,			/* nsegments */
2339 			       sizeof(sc->ha_statusmem),/* maxsegsize */
2340 			       0,			/* flags */
2341 			       &sc->ha_statusmem_dmat)) {
2342 		device_printf(dev, "Cannot allocate status DMA tag\n");
2343 		bus_dma_tag_destroy(sc->ha_parent_dmat);
2344 		return (ENOMEM);
2345 	}
2346 
2347 	if (bus_dmamem_alloc(sc->ha_statusmem_dmat, (void **)&sc->ha_statusmem,
2348 	    BUS_DMA_NOWAIT, &sc->ha_statusmem_dmamap)) {
2349 		device_printf(dev, "Cannot allocate status memory\n");
2350 		bus_dma_tag_destroy(sc->ha_statusmem_dmat);
2351 		bus_dma_tag_destroy(sc->ha_parent_dmat);
2352 		return (ENOMEM);
2353 	}
2354 	(void)bus_dmamap_load(sc->ha_statusmem_dmat, sc->ha_statusmem_dmamap,
2355 	    sc->ha_statusmem, sizeof(sc->ha_statusmem), asr_status_cb, sc, 0);
2356 
2357 	return (0);
2358 }
2359 
2360 static void
2361 asr_release_dma(Asr_softc_t *sc)
2362 {
2363 
2364 	if (sc->ha_rstatus_phys != 0)
2365 		bus_dmamap_unload(sc->ha_statusmem_dmat,
2366 		    sc->ha_statusmem_dmamap);
2367 	if (sc->ha_statusmem != NULL)
2368 		bus_dmamem_free(sc->ha_statusmem_dmat, sc->ha_statusmem,
2369 		    sc->ha_statusmem_dmamap);
2370 	if (sc->ha_statusmem_dmat != NULL)
2371 		bus_dma_tag_destroy(sc->ha_statusmem_dmat);
2372 	if (sc->ha_parent_dmat != NULL)
2373 		bus_dma_tag_destroy(sc->ha_parent_dmat);
2374 }
2375 
2376 /*
2377  *	Attach the devices, and virtual devices to the driver list.
2378  */
2379 static int
2380 asr_attach(device_t dev)
2381 {
2382 	PI2O_EXEC_STATUS_GET_REPLY status;
2383 	PI2O_LCT_ENTRY		 Device;
2384 	Asr_softc_t		 *sc, **ha;
2385 	struct scsi_inquiry_data *iq;
2386 	int			 bus, size, unit;
2387 	int			 error;
2388 
2389 	sc = device_get_softc(dev);
2390 	unit = device_get_unit(dev);
2391 	sc->ha_dev = dev;
2392 
2393 	if (Asr_softc_list == NULL) {
2394 		/*
2395 		 *	Fixup the OS revision as saved in the dptsig for the
2396 		 *	engine (dptioctl.h) to pick up.
2397 		 */
2398 		bcopy(osrelease, &ASR_sig.dsDescription[16], 5);
2399 	}
2400 	/*
2401 	 *	Initialize the software structure
2402 	 */
2403 	LIST_INIT(&(sc->ha_ccb));
2404 	/* Link us into the HA list */
2405 	for (ha = &Asr_softc_list; *ha; ha = &((*ha)->ha_next));
2406 		*(ha) = sc;
2407 
2408 	/*
2409 	 *	This is the real McCoy!
2410 	 */
2411 	if (!asr_pci_map_mem(dev, sc)) {
2412 		device_printf(dev, "could not map memory\n");
2413 		return(ENXIO);
2414 	}
2415 	/* Enable if not formerly enabled */
2416 	pci_write_config(dev, PCIR_COMMAND,
2417 	    pci_read_config(dev, PCIR_COMMAND, sizeof(char)) |
2418 	    PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN, sizeof(char));
2419 
2420 	sc->ha_pciBusNum = pci_get_bus(dev);
2421 	sc->ha_pciDeviceNum = (pci_get_slot(dev) << 3) | pci_get_function(dev);
2422 
2423 	if ((error = asr_alloc_dma(sc)) != 0)
2424 		return (error);
2425 
2426 	/* Check if the device is there? */
2427 	if (ASR_resetIOP(sc) == 0) {
2428 		device_printf(dev, "Cannot reset adapter\n");
2429 		asr_release_dma(sc);
2430 		return (EIO);
2431 	}
2432 	status = &sc->ha_statusmem->status;
2433 	if (ASR_getStatus(sc) == NULL) {
2434 		device_printf(dev, "could not initialize hardware\n");
2435 		asr_release_dma(sc);
2436 		return(ENODEV);
2437 	}
2438 	sc->ha_SystemTable.OrganizationID = status->OrganizationID;
2439 	sc->ha_SystemTable.IOP_ID = status->IOP_ID;
2440 	sc->ha_SystemTable.I2oVersion = status->I2oVersion;
2441 	sc->ha_SystemTable.IopState = status->IopState;
2442 	sc->ha_SystemTable.MessengerType = status->MessengerType;
2443 	sc->ha_SystemTable.InboundMessageFrameSize = status->InboundMFrameSize;
2444 	sc->ha_SystemTable.MessengerInfo.InboundMessagePortAddressLow =
2445 	    (U32)(sc->ha_Base + I2O_REG_TOFIFO);	/* XXX 64-bit */
2446 
2447 	if (!asr_pci_map_int(dev, (void *)sc)) {
2448 		device_printf(dev, "could not map interrupt\n");
2449 		asr_release_dma(sc);
2450 		return(ENXIO);
2451 	}
2452 
2453 	/* Adjust the maximim inbound count */
2454 	if (((sc->ha_QueueSize =
2455 	    I2O_EXEC_STATUS_GET_REPLY_getMaxInboundMFrames(status)) >
2456 	    MAX_INBOUND) || (sc->ha_QueueSize == 0)) {
2457 		sc->ha_QueueSize = MAX_INBOUND;
2458 	}
2459 
2460 	/* Adjust the maximum outbound count */
2461 	if (((sc->ha_Msgs_Count =
2462 	    I2O_EXEC_STATUS_GET_REPLY_getMaxOutboundMFrames(status)) >
2463 	    MAX_OUTBOUND) || (sc->ha_Msgs_Count == 0)) {
2464 		sc->ha_Msgs_Count = MAX_OUTBOUND;
2465 	}
2466 	if (sc->ha_Msgs_Count > sc->ha_QueueSize) {
2467 		sc->ha_Msgs_Count = sc->ha_QueueSize;
2468 	}
2469 
2470 	/* Adjust the maximum SG size to adapter */
2471 	if ((size = (I2O_EXEC_STATUS_GET_REPLY_getInboundMFrameSize(status) <<
2472 	    2)) > MAX_INBOUND_SIZE) {
2473 		size = MAX_INBOUND_SIZE;
2474 	}
2475 	sc->ha_SgSize = (size - sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2476 	  + sizeof(I2O_SG_ELEMENT)) / sizeof(I2O_SGE_SIMPLE_ELEMENT);
2477 
2478 	/*
2479 	 *	Only do a bus/HBA reset on the first time through. On this
2480 	 * first time through, we do not send a flush to the devices.
2481 	 */
2482 	if (ASR_init(sc) == 0) {
2483 		struct BufferInfo {
2484 			I2O_PARAM_RESULTS_LIST_HEADER	    Header;
2485 			I2O_PARAM_READ_OPERATION_RESULT	    Read;
2486 			I2O_DPT_EXEC_IOP_BUFFERS_SCALAR	    Info;
2487 		} Buffer;
2488 		PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2489 #define FW_DEBUG_BLED_OFFSET 8
2490 
2491 		if ((Info = (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)
2492 		    ASR_getParams(sc, 0, I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO,
2493 		    &Buffer, sizeof(struct BufferInfo))) != NULL) {
2494 			sc->ha_blinkLED = FW_DEBUG_BLED_OFFSET +
2495 			    I2O_DPT_EXEC_IOP_BUFFERS_SCALAR_getSerialOutputOffset(Info);
2496 		}
2497 		if (ASR_acquireLct(sc) == 0) {
2498 			(void)ASR_acquireHrt(sc);
2499 		}
2500 	} else {
2501 		device_printf(dev, "failed to initialize\n");
2502 		asr_release_dma(sc);
2503 		return(ENXIO);
2504 	}
2505 	/*
2506 	 *	Add in additional probe responses for more channels. We
2507 	 * are reusing the variable `target' for a channel loop counter.
2508 	 * Done here because of we need both the acquireLct and
2509 	 * acquireHrt data.
2510 	 */
2511 	for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2512 	    (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); ++Device) {
2513 		if (Device->le_type == I2O_UNKNOWN) {
2514 			continue;
2515 		}
2516 		if (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF) {
2517 			if (Device->le_target > sc->ha_MaxId) {
2518 				sc->ha_MaxId = Device->le_target;
2519 			}
2520 			if (Device->le_lun > sc->ha_MaxLun) {
2521 				sc->ha_MaxLun = Device->le_lun;
2522 			}
2523 		}
2524 		if (((Device->le_type & I2O_PORT) != 0)
2525 		 && (Device->le_bus <= MAX_CHANNEL)) {
2526 			/* Do not increase MaxId for efficiency */
2527 			sc->ha_adapter_target[Device->le_bus] =
2528 			    Device->le_target;
2529 		}
2530 	}
2531 
2532 	/*
2533 	 *	Print the HBA model number as inquired from the card.
2534 	 */
2535 
2536 	device_printf(dev, " ");
2537 
2538 	if ((iq = (struct scsi_inquiry_data *)kmalloc(
2539 	    sizeof(struct scsi_inquiry_data), M_TEMP, M_WAITOK | M_ZERO)) !=
2540 	    NULL) {
2541 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message;
2542 		PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message_Ptr;
2543 		int					posted = 0;
2544 
2545 		Message_Ptr = (PRIVATE_SCSI_SCB_EXECUTE_MESSAGE *)&Message;
2546 		bzero(Message_Ptr, sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
2547 		    sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2548 
2549 		I2O_MESSAGE_FRAME_setVersionOffset(
2550 		    (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_VERSION_11 |
2551 		    (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2552 		    - sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4));
2553 		I2O_MESSAGE_FRAME_setMessageSize(
2554 		    (PI2O_MESSAGE_FRAME)Message_Ptr,
2555 		    (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
2556 		    sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)) /
2557 		    sizeof(U32));
2558 		I2O_MESSAGE_FRAME_setInitiatorAddress(
2559 		    (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2560 		I2O_MESSAGE_FRAME_setFunction(
2561 		    (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2562 		I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode(
2563 		    (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
2564 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2565 		    I2O_SCB_FLAG_ENABLE_DISCONNECT
2566 		  | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2567 		  | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2568 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setInterpret(Message_Ptr, 1);
2569 		I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2570 		    (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2571 		    DPT_ORGANIZATION_ID);
2572 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2573 		Message_Ptr->CDB[0] = INQUIRY;
2574 		Message_Ptr->CDB[4] =
2575 		    (unsigned char)sizeof(struct scsi_inquiry_data);
2576 		if (Message_Ptr->CDB[4] == 0) {
2577 			Message_Ptr->CDB[4] = 255;
2578 		}
2579 
2580 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2581 		  (I2O_SCB_FLAG_XFER_FROM_DEVICE
2582 		    | I2O_SCB_FLAG_ENABLE_DISCONNECT
2583 		    | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2584 		    | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2585 
2586 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
2587 		  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
2588 		  sizeof(struct scsi_inquiry_data));
2589 		SG(&(Message_Ptr->SGL), 0,
2590 		  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2591 		  iq, sizeof(struct scsi_inquiry_data));
2592 		(void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2593 
2594 		if (iq->vendor[0] && (iq->vendor[0] != ' ')) {
2595 			kprintf (" ");
2596 			ASR_prstring (iq->vendor, 8);
2597 			++posted;
2598 		}
2599 		if (iq->product[0] && (iq->product[0] != ' ')) {
2600 			kprintf (" ");
2601 			ASR_prstring (iq->product, 16);
2602 			++posted;
2603 		}
2604 		if (iq->revision[0] && (iq->revision[0] != ' ')) {
2605 			kprintf (" FW Rev. ");
2606 			ASR_prstring (iq->revision, 4);
2607 			++posted;
2608 		}
2609 		kfree(iq, M_TEMP);
2610 		if (posted) {
2611 			kprintf (",");
2612 		}
2613 	}
2614 	kprintf (" %d channel, %d CCBs, Protocol I2O\n", sc->ha_MaxBus + 1,
2615 	  (sc->ha_QueueSize > MAX_INBOUND) ? MAX_INBOUND : sc->ha_QueueSize);
2616 
2617 	for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2618 		struct cam_devq	  * devq;
2619 		int		    QueueSize = sc->ha_QueueSize;
2620 
2621 		if (QueueSize > MAX_INBOUND) {
2622 			QueueSize = MAX_INBOUND;
2623 		}
2624 
2625 		/*
2626 		 *	Create the device queue for our SIM(s).
2627 		 */
2628 		if ((devq = cam_simq_alloc(QueueSize)) == NULL) {
2629 			continue;
2630 		}
2631 
2632 		/*
2633 		 *	Construct our first channel SIM entry
2634 		 */
2635 		sc->ha_sim[bus] = cam_sim_alloc(asr_action, asr_poll, "asr", sc,
2636 						unit, &sim_mplock,
2637 						1, QueueSize, devq);
2638 		if (sc->ha_sim[bus] == NULL) {
2639 			continue;
2640 		}
2641 
2642 		if (xpt_bus_register(sc->ha_sim[bus], bus) != CAM_SUCCESS){
2643 			cam_sim_free(sc->ha_sim[bus]);
2644 			sc->ha_sim[bus] = NULL;
2645 			continue;
2646 		}
2647 
2648 		if (xpt_create_path(&(sc->ha_path[bus]), /*periph*/NULL,
2649 		    cam_sim_path(sc->ha_sim[bus]), CAM_TARGET_WILDCARD,
2650 		    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2651 			xpt_bus_deregister( cam_sim_path(sc->ha_sim[bus]));
2652 			cam_sim_free(sc->ha_sim[bus]);
2653 			sc->ha_sim[bus] = NULL;
2654 			continue;
2655 		}
2656 	}
2657 
2658 	/*
2659 	 *	Generate the device node information
2660 	 */
2661 	sc->ha_devt = make_dev(&asr_ops, unit, UID_ROOT, GID_OPERATOR, 0640,
2662 			       "asr%d", unit);
2663 	if (sc->ha_devt != NULL)
2664 		(void)make_dev_alias(sc->ha_devt, "rdpti%d", unit);
2665 	sc->ha_devt->si_drv1 = sc;
2666 	return(0);
2667 } /* asr_attach */
2668 
2669 static void
2670 asr_poll(struct cam_sim *sim)
2671 {
2672 	asr_intr(cam_sim_softc(sim));
2673 } /* asr_poll */
2674 
2675 static void
2676 asr_action(struct cam_sim *sim, union ccb  *ccb)
2677 {
2678 	struct Asr_softc *sc;
2679 
2680 	debug_asr_printf("asr_action(%lx,%lx{%x})\n", (u_long)sim, (u_long)ccb,
2681 			 ccb->ccb_h.func_code);
2682 
2683 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("asr_action\n"));
2684 
2685 	ccb->ccb_h.spriv_ptr0 = sc = (struct Asr_softc *)cam_sim_softc(sim);
2686 
2687 	switch (ccb->ccb_h.func_code) {
2688 
2689 	/* Common cases first */
2690 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
2691 	{
2692 		struct Message {
2693 			char M[MAX_INBOUND_SIZE];
2694 		} Message;
2695 		PI2O_MESSAGE_FRAME   Message_Ptr;
2696 
2697 		/* Reject incoming commands while we are resetting the card */
2698 		if (sc->ha_in_reset != HA_OPERATIONAL) {
2699 			ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2700 			if (sc->ha_in_reset >= HA_OFF_LINE) {
2701 				/* HBA is now off-line */
2702 				ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
2703 			} else {
2704 				/* HBA currently resetting, try again later. */
2705 				ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2706 			}
2707 			debug_asr_cmd_printf (" e\n");
2708 			xpt_done(ccb);
2709 			debug_asr_cmd_printf (" q\n");
2710 			break;
2711 		}
2712 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2713 			kprintf(
2714 			  "asr%d WARNING: scsi_cmd(%x) already done on b%dt%du%d\n",
2715 			  cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
2716 			  ccb->csio.cdb_io.cdb_bytes[0],
2717 			  cam_sim_bus(sim),
2718 			  ccb->ccb_h.target_id,
2719 			  ccb->ccb_h.target_lun);
2720 		}
2721 		debug_asr_cmd_printf("(%d,%d,%d,%d)", cam_sim_unit(sim),
2722 				     cam_sim_bus(sim), ccb->ccb_h.target_id,
2723 				     ccb->ccb_h.target_lun);
2724 		debug_asr_dump_ccb(ccb);
2725 
2726 		if ((Message_Ptr = ASR_init_message((union asr_ccb *)ccb,
2727 		  (PI2O_MESSAGE_FRAME)&Message)) != NULL) {
2728 			debug_asr_cmd2_printf ("TID=%x:\n",
2729 			  PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_getTID(
2730 			    (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr));
2731 			debug_asr_cmd2_dump_message(Message_Ptr);
2732 			debug_asr_cmd1_printf (" q");
2733 
2734 			if (ASR_queue (sc, Message_Ptr) == EMPTY_QUEUE) {
2735 				ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2736 				ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2737 				debug_asr_cmd_printf (" E\n");
2738 				xpt_done(ccb);
2739 			}
2740 			debug_asr_cmd_printf(" Q\n");
2741 			break;
2742 		}
2743 		/*
2744 		 *	We will get here if there is no valid TID for the device
2745 		 * referenced in the scsi command packet.
2746 		 */
2747 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2748 		ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
2749 		debug_asr_cmd_printf (" B\n");
2750 		xpt_done(ccb);
2751 		break;
2752 	}
2753 
2754 	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
2755 		/* Rese HBA device ... */
2756 		asr_hbareset (sc);
2757 		ccb->ccb_h.status = CAM_REQ_CMP;
2758 		xpt_done(ccb);
2759 		break;
2760 
2761 #if (defined(REPORT_LUNS))
2762 	case REPORT_LUNS:
2763 #endif
2764 	case XPT_ABORT:			/* Abort the specified CCB */
2765 		/* XXX Implement */
2766 		ccb->ccb_h.status = CAM_REQ_INVALID;
2767 		xpt_done(ccb);
2768 		break;
2769 
2770 	case XPT_SET_TRAN_SETTINGS:
2771 		/* XXX Implement */
2772 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2773 		xpt_done(ccb);
2774 		break;
2775 
2776 	case XPT_GET_TRAN_SETTINGS:
2777 	/* Get default/user set transfer settings for the target */
2778 	{
2779 		struct	ccb_trans_settings *cts = &(ccb->cts);
2780 		struct ccb_trans_settings_scsi *scsi =
2781 		    &cts->proto_specific.scsi;
2782 		struct ccb_trans_settings_spi *spi =
2783 		    &cts->xport_specific.spi;
2784 
2785 		if (cts->type == CTS_TYPE_USER_SETTINGS) {
2786 			cts->protocol = PROTO_SCSI;
2787 			cts->protocol_version = SCSI_REV_2;
2788 			cts->transport = XPORT_SPI;
2789 			cts->transport_version = 2;
2790 
2791 			scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2792 			spi->flags = CTS_SPI_FLAGS_DISC_ENB;
2793 			spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2794 			spi->sync_period = 6; /* 40MHz */
2795 			spi->sync_offset = 15;
2796 			spi->valid = CTS_SPI_VALID_SYNC_RATE
2797 				   | CTS_SPI_VALID_SYNC_OFFSET
2798 				   | CTS_SPI_VALID_BUS_WIDTH
2799 				   | CTS_SPI_VALID_DISC;
2800 			scsi->valid = CTS_SCSI_VALID_TQ;
2801 
2802 			ccb->ccb_h.status = CAM_REQ_CMP;
2803 		} else {
2804 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2805 		}
2806 		xpt_done(ccb);
2807 		break;
2808 	}
2809 
2810 	case XPT_CALC_GEOMETRY:
2811 	{
2812 		struct	  ccb_calc_geometry *ccg;
2813 		u_int32_t size_mb;
2814 		u_int32_t secs_per_cylinder;
2815 
2816 		ccg = &(ccb->ccg);
2817 		size_mb = ccg->volume_size
2818 			/ ((1024L * 1024L) / ccg->block_size);
2819 
2820 		if (size_mb > 4096) {
2821 			ccg->heads = 255;
2822 			ccg->secs_per_track = 63;
2823 		} else if (size_mb > 2048) {
2824 			ccg->heads = 128;
2825 			ccg->secs_per_track = 63;
2826 		} else if (size_mb > 1024) {
2827 			ccg->heads = 65;
2828 			ccg->secs_per_track = 63;
2829 		} else {
2830 			ccg->heads = 64;
2831 			ccg->secs_per_track = 32;
2832 		}
2833 		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2834 		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2835 		ccb->ccb_h.status = CAM_REQ_CMP;
2836 		xpt_done(ccb);
2837 		break;
2838 	}
2839 
2840 	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
2841 		ASR_resetBus (sc, cam_sim_bus(sim));
2842 		ccb->ccb_h.status = CAM_REQ_CMP;
2843 		xpt_done(ccb);
2844 		break;
2845 
2846 	case XPT_TERM_IO:		/* Terminate the I/O process */
2847 		/* XXX Implement */
2848 		ccb->ccb_h.status = CAM_REQ_INVALID;
2849 		xpt_done(ccb);
2850 		break;
2851 
2852 	case XPT_PATH_INQ:		/* Path routing inquiry */
2853 	{
2854 		struct ccb_pathinq *cpi = &(ccb->cpi);
2855 
2856 		cpi->version_num = 1; /* XXX??? */
2857 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2858 		cpi->target_sprt = 0;
2859 		/* Not necessary to reset bus, done by HDM initialization */
2860 		cpi->hba_misc = PIM_NOBUSRESET;
2861 		cpi->hba_eng_cnt = 0;
2862 		cpi->max_target = sc->ha_MaxId;
2863 		cpi->max_lun = sc->ha_MaxLun;
2864 		cpi->initiator_id = sc->ha_adapter_target[cam_sim_bus(sim)];
2865 		cpi->bus_id = cam_sim_bus(sim);
2866 		cpi->base_transfer_speed = 3300;
2867 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2868 		strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
2869 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2870 		cpi->unit_number = cam_sim_unit(sim);
2871 		cpi->ccb_h.status = CAM_REQ_CMP;
2872                 cpi->transport = XPORT_SPI;
2873                 cpi->transport_version = 2;
2874                 cpi->protocol = PROTO_SCSI;
2875                 cpi->protocol_version = SCSI_REV_2;
2876 		xpt_done(ccb);
2877 		break;
2878 	}
2879 	default:
2880 		ccb->ccb_h.status = CAM_REQ_INVALID;
2881 		xpt_done(ccb);
2882 		break;
2883 	}
2884 } /* asr_action */
2885 
2886 /*
2887  * Handle processing of current CCB as pointed to by the Status.
2888  */
2889 static int
2890 asr_intr(Asr_softc_t *sc)
2891 {
2892 	int processed;
2893 
2894 	for(processed = 0; asr_get_status(sc) & Mask_InterruptsDisabled;
2895 	    processed = 1) {
2896 		union asr_ccb			   *ccb;
2897 		u_int				    dsc;
2898 		U32				    ReplyOffset;
2899 		PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
2900 
2901 		if (((ReplyOffset = asr_get_FromFIFO(sc)) == EMPTY_QUEUE)
2902 		 && ((ReplyOffset = asr_get_FromFIFO(sc)) == EMPTY_QUEUE)) {
2903 			break;
2904 		}
2905 		Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)(ReplyOffset
2906 		  - sc->ha_Msgs_Phys + (char *)(sc->ha_Msgs));
2907 		/*
2908 		 * We do not need any (optional byteswapping) method access to
2909 		 * the Initiator context field.
2910 		 */
2911 		ccb = (union asr_ccb *)(long)
2912 		  I2O_MESSAGE_FRAME_getInitiatorContext64(
2913 		    &(Reply->StdReplyFrame.StdMessageFrame));
2914 		if (I2O_MESSAGE_FRAME_getMsgFlags(
2915 		  &(Reply->StdReplyFrame.StdMessageFrame))
2916 		  & I2O_MESSAGE_FLAGS_FAIL) {
2917 			I2O_UTIL_NOP_MESSAGE	Message;
2918 			PI2O_UTIL_NOP_MESSAGE	Message_Ptr;
2919 			U32			MessageOffset;
2920 
2921 			MessageOffset = (u_long)
2922 			  I2O_FAILURE_REPLY_MESSAGE_FRAME_getPreservedMFA(
2923 			    (PI2O_FAILURE_REPLY_MESSAGE_FRAME)Reply);
2924 			/*
2925 			 *  Get the Original Message Frame's address, and get
2926 			 * it's Transaction Context into our space. (Currently
2927 			 * unused at original authorship, but better to be
2928 			 * safe than sorry). Straight copy means that we
2929 			 * need not concern ourselves with the (optional
2930 			 * byteswapping) method access.
2931 			 */
2932 			Reply->StdReplyFrame.TransactionContext =
2933 			    bus_space_read_4(sc->ha_frame_btag,
2934 			    sc->ha_frame_bhandle, MessageOffset +
2935 			    offsetof(I2O_SINGLE_REPLY_MESSAGE_FRAME,
2936 			    TransactionContext));
2937 			/*
2938 			 *	For 64 bit machines, we need to reconstruct the
2939 			 * 64 bit context.
2940 			 */
2941 			ccb = (union asr_ccb *)(long)
2942 			  I2O_MESSAGE_FRAME_getInitiatorContext64(
2943 			    &(Reply->StdReplyFrame.StdMessageFrame));
2944 			/*
2945 			 * Unique error code for command failure.
2946 			 */
2947 			I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
2948 			  &(Reply->StdReplyFrame), (u_int16_t)-2);
2949 			/*
2950 			 *  Modify the message frame to contain a NOP and
2951 			 * re-issue it to the controller.
2952 			 */
2953 			Message_Ptr = (PI2O_UTIL_NOP_MESSAGE)ASR_fillMessage(
2954 			    &Message, sizeof(I2O_UTIL_NOP_MESSAGE));
2955 #if (I2O_UTIL_NOP != 0)
2956 				I2O_MESSAGE_FRAME_setFunction (
2957 				  &(Message_Ptr->StdMessageFrame),
2958 				  I2O_UTIL_NOP);
2959 #endif
2960 			/*
2961 			 *  Copy the packet out to the Original Message
2962 			 */
2963 			asr_set_frame(sc, Message_Ptr, MessageOffset,
2964 				      sizeof(I2O_UTIL_NOP_MESSAGE));
2965 			/*
2966 			 *  Issue the NOP
2967 			 */
2968 			asr_set_ToFIFO(sc, MessageOffset);
2969 		}
2970 
2971 		/*
2972 		 *	Asynchronous command with no return requirements,
2973 		 * and a generic handler for immunity against odd error
2974 		 * returns from the adapter.
2975 		 */
2976 		if (ccb == NULL) {
2977 			/*
2978 			 * Return Reply so that it can be used for the
2979 			 * next command
2980 			 */
2981 			asr_set_FromFIFO(sc, ReplyOffset);
2982 			continue;
2983 		}
2984 
2985 		/* Welease Wadjah! (and stop timeouts) */
2986 		ASR_ccbRemove (sc, ccb);
2987 
2988 		dsc = I2O_SINGLE_REPLY_MESSAGE_FRAME_getDetailedStatusCode(
2989 		    &(Reply->StdReplyFrame));
2990 		ccb->csio.scsi_status = dsc & I2O_SCSI_DEVICE_DSC_MASK;
2991 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2992 		switch (dsc) {
2993 
2994 		case I2O_SCSI_DSC_SUCCESS:
2995 			ccb->ccb_h.status |= CAM_REQ_CMP;
2996 			break;
2997 
2998 		case I2O_SCSI_DSC_CHECK_CONDITION:
2999 			ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR |
3000 			    CAM_AUTOSNS_VALID;
3001 			break;
3002 
3003 		case I2O_SCSI_DSC_BUSY:
3004 			/* FALLTHRU */
3005 		case I2O_SCSI_HBA_DSC_ADAPTER_BUSY:
3006 			/* FALLTHRU */
3007 		case I2O_SCSI_HBA_DSC_SCSI_BUS_RESET:
3008 			/* FALLTHRU */
3009 		case I2O_SCSI_HBA_DSC_BUS_BUSY:
3010 			ccb->ccb_h.status |= CAM_SCSI_BUSY;
3011 			break;
3012 
3013 		case I2O_SCSI_HBA_DSC_SELECTION_TIMEOUT:
3014 			ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
3015 			break;
3016 
3017 		case I2O_SCSI_HBA_DSC_COMMAND_TIMEOUT:
3018 			/* FALLTHRU */
3019 		case I2O_SCSI_HBA_DSC_DEVICE_NOT_PRESENT:
3020 			/* FALLTHRU */
3021 		case I2O_SCSI_HBA_DSC_LUN_INVALID:
3022 			/* FALLTHRU */
3023 		case I2O_SCSI_HBA_DSC_SCSI_TID_INVALID:
3024 			ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
3025 			break;
3026 
3027 		case I2O_SCSI_HBA_DSC_DATA_OVERRUN:
3028 			/* FALLTHRU */
3029 		case I2O_SCSI_HBA_DSC_REQUEST_LENGTH_ERROR:
3030 			ccb->ccb_h.status |= CAM_DATA_RUN_ERR;
3031 			break;
3032 
3033 		default:
3034 			ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3035 			break;
3036 		}
3037 		if ((ccb->csio.resid = ccb->csio.dxfer_len) != 0) {
3038 			ccb->csio.resid -=
3039 			  I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getTransferCount(
3040 			    Reply);
3041 		}
3042 
3043 		/* Sense data in reply packet */
3044 		if (ccb->ccb_h.status & CAM_AUTOSNS_VALID) {
3045 			u_int16_t size = I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getAutoSenseTransferCount(Reply);
3046 
3047 			if (size) {
3048 				if (size > sizeof(ccb->csio.sense_data)) {
3049 					size = sizeof(ccb->csio.sense_data);
3050 				}
3051 				if (size > I2O_SCSI_SENSE_DATA_SZ) {
3052 					size = I2O_SCSI_SENSE_DATA_SZ;
3053 				}
3054 				if ((ccb->csio.sense_len)
3055 				 && (size > ccb->csio.sense_len)) {
3056 					size = ccb->csio.sense_len;
3057 				}
3058 				if (size < ccb->csio.sense_len) {
3059 					ccb->csio.sense_resid =
3060 					    ccb->csio.sense_len - size;
3061 				} else {
3062 					ccb->csio.sense_resid = 0;
3063 				}
3064 				bzero(&(ccb->csio.sense_data),
3065 				    sizeof(ccb->csio.sense_data));
3066 				bcopy(Reply->SenseData,
3067 				      &(ccb->csio.sense_data), size);
3068 			}
3069 		}
3070 
3071 		/*
3072 		 * Return Reply so that it can be used for the next command
3073 		 * since we have no more need for it now
3074 		 */
3075 		asr_set_FromFIFO(sc, ReplyOffset);
3076 
3077 		if (ccb->ccb_h.path) {
3078 			xpt_done ((union ccb *)ccb);
3079 		} else {
3080 			wakeup (ccb);
3081 		}
3082 	}
3083 	return (processed);
3084 } /* asr_intr */
3085 
3086 #undef QueueSize	/* Grrrr */
3087 #undef SG_Size		/* Grrrr */
3088 
3089 /*
3090  *	Meant to be included at the bottom of asr.c !!!
3091  */
3092 
3093 /*
3094  *	Included here as hard coded. Done because other necessary include
3095  *	files utilize C++ comment structures which make them a nuisance to
3096  *	included here just to pick up these three typedefs.
3097  */
3098 typedef U32   DPT_TAG_T;
3099 typedef U32   DPT_MSG_T;
3100 typedef U32   DPT_RTN_T;
3101 
3102 #undef SCSI_RESET	/* Conflicts with "scsi/scsiconf.h" defintion */
3103 #include	"dev/raid/asr/osd_unix.h"
3104 
3105 #define	asr_unit(dev)	  minor(dev)
3106 
3107 static u_int8_t ASR_ctlr_held;
3108 
3109 static int
3110 asr_open(struct dev_open_args *ap)
3111 {
3112 	cdev_t dev = ap->a_head.a_dev;
3113 	int		 error;
3114 
3115 	if (dev->si_drv1 == NULL) {
3116 		return (ENODEV);
3117 	}
3118 	crit_enter();
3119 	if (ASR_ctlr_held) {
3120 		error = EBUSY;
3121 	} else if ((error = priv_check_cred(ap->a_cred, PRIV_ROOT, 0)) == 0) {
3122 		++ASR_ctlr_held;
3123 	}
3124 	crit_exit();
3125 	return (error);
3126 } /* asr_open */
3127 
3128 static int
3129 asr_close(struct dev_close_args *ap)
3130 {
3131 
3132 	ASR_ctlr_held = 0;
3133 	return (0);
3134 } /* asr_close */
3135 
3136 
3137 /*-------------------------------------------------------------------------*/
3138 /*		      Function ASR_queue_i				   */
3139 /*-------------------------------------------------------------------------*/
3140 /* The Parameters Passed To This Function Are :				   */
3141 /*     Asr_softc_t *	  : HBA miniport driver's adapter data storage.	   */
3142 /*     PI2O_MESSAGE_FRAME : Msg Structure Pointer For This Command	   */
3143 /*	I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME following the Msg Structure	   */
3144 /*									   */
3145 /* This Function Will Take The User Request Packet And Convert It To An	   */
3146 /* I2O MSG And Send It Off To The Adapter.				   */
3147 /*									   */
3148 /* Return : 0 For OK, Error Code Otherwise				   */
3149 /*-------------------------------------------------------------------------*/
3150 static int
3151 ASR_queue_i(Asr_softc_t	*sc, PI2O_MESSAGE_FRAME	Packet)
3152 {
3153 	union asr_ccb				   * ccb;
3154 	PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME	     Reply;
3155 	PI2O_MESSAGE_FRAME			     Message_Ptr;
3156 	PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME	     Reply_Ptr;
3157 	int					     MessageSizeInBytes;
3158 	int					     ReplySizeInBytes;
3159 	int					     error;
3160 	int					     s;
3161 	/* Scatter Gather buffer list */
3162 	struct ioctlSgList_S {
3163 		SLIST_ENTRY(ioctlSgList_S) link;
3164 		caddr_t			   UserSpace;
3165 		I2O_FLAGS_COUNT		   FlagsCount;
3166 		char			   KernelSpace[sizeof(long)];
3167 	}					   * elm;
3168 	/* Generates a `first' entry */
3169 	SLIST_HEAD(ioctlSgListHead_S, ioctlSgList_S) sgList;
3170 
3171 	if (ASR_getBlinkLedCode(sc)) {
3172 		debug_usr_cmd_printf ("Adapter currently in BlinkLed %x\n",
3173 		  ASR_getBlinkLedCode(sc));
3174 		return (EIO);
3175 	}
3176 	/* Copy in the message into a local allocation */
3177 	if ((Message_Ptr = (PI2O_MESSAGE_FRAME)kmalloc (
3178 	  sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) == NULL) {
3179 		debug_usr_cmd_printf (
3180 		  "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3181 		return (ENOMEM);
3182 	}
3183 	if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3184 	  sizeof(I2O_MESSAGE_FRAME))) != 0) {
3185 		kfree(Message_Ptr, M_TEMP);
3186 		debug_usr_cmd_printf ("Can't copy in packet errno=%d\n", error);
3187 		return (error);
3188 	}
3189 	/* Acquire information to determine type of packet */
3190 	MessageSizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)<<2);
3191 	/* The offset of the reply information within the user packet */
3192 	Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)((char *)Packet
3193 	  + MessageSizeInBytes);
3194 
3195 	/* Check if the message is a synchronous initialization command */
3196 	s = I2O_MESSAGE_FRAME_getFunction(Message_Ptr);
3197 	kfree(Message_Ptr, M_TEMP);
3198 	switch (s) {
3199 
3200 	case I2O_EXEC_IOP_RESET:
3201 	{	U32 status;
3202 
3203 		status = ASR_resetIOP(sc);
3204 		ReplySizeInBytes = sizeof(status);
3205 		debug_usr_cmd_printf ("resetIOP done\n");
3206 		return (copyout ((caddr_t)&status, (caddr_t)Reply,
3207 		  ReplySizeInBytes));
3208 	}
3209 
3210 	case I2O_EXEC_STATUS_GET:
3211 	{	PI2O_EXEC_STATUS_GET_REPLY status;
3212 
3213 		status = &sc->ha_statusmem->status;
3214 		if (ASR_getStatus(sc) == NULL) {
3215 			debug_usr_cmd_printf ("getStatus failed\n");
3216 			return (ENXIO);
3217 		}
3218 		ReplySizeInBytes = sizeof(status);
3219 		debug_usr_cmd_printf ("getStatus done\n");
3220 		return (copyout ((caddr_t)status, (caddr_t)Reply,
3221 		  ReplySizeInBytes));
3222 	}
3223 
3224 	case I2O_EXEC_OUTBOUND_INIT:
3225 	{	U32 status;
3226 
3227 		status = ASR_initOutBound(sc);
3228 		ReplySizeInBytes = sizeof(status);
3229 		debug_usr_cmd_printf ("intOutBound done\n");
3230 		return (copyout ((caddr_t)&status, (caddr_t)Reply,
3231 		  ReplySizeInBytes));
3232 	}
3233 	}
3234 
3235 	/* Determine if the message size is valid */
3236 	if ((MessageSizeInBytes < sizeof(I2O_MESSAGE_FRAME))
3237 	 || (MAX_INBOUND_SIZE < MessageSizeInBytes)) {
3238 		debug_usr_cmd_printf ("Packet size %d incorrect\n",
3239 		  MessageSizeInBytes);
3240 		return (EINVAL);
3241 	}
3242 
3243 	if ((Message_Ptr = (PI2O_MESSAGE_FRAME)kmalloc (MessageSizeInBytes,
3244 	  M_TEMP, M_WAITOK)) == NULL) {
3245 		debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3246 		  MessageSizeInBytes);
3247 		return (ENOMEM);
3248 	}
3249 	if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3250 	  MessageSizeInBytes)) != 0) {
3251 		kfree(Message_Ptr, M_TEMP);
3252 		debug_usr_cmd_printf ("Can't copy in packet[%d] errno=%d\n",
3253 		  MessageSizeInBytes, error);
3254 		return (error);
3255 	}
3256 
3257 	/* Check the size of the reply frame, and start constructing */
3258 
3259 	if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)kmalloc (
3260 	  sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) == NULL) {
3261 		kfree(Message_Ptr, M_TEMP);
3262 		debug_usr_cmd_printf (
3263 		  "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3264 		return (ENOMEM);
3265 	}
3266 	if ((error = copyin ((caddr_t)Reply, (caddr_t)Reply_Ptr,
3267 	  sizeof(I2O_MESSAGE_FRAME))) != 0) {
3268 		kfree(Reply_Ptr, M_TEMP);
3269 		kfree(Message_Ptr, M_TEMP);
3270 		debug_usr_cmd_printf (
3271 		  "Failed to copy in reply frame, errno=%d\n",
3272 		  error);
3273 		return (error);
3274 	}
3275 	ReplySizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(
3276 	  &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) << 2);
3277 	kfree(Reply_Ptr, M_TEMP);
3278 	if (ReplySizeInBytes < sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)) {
3279 		kfree(Message_Ptr, M_TEMP);
3280 		debug_usr_cmd_printf (
3281 		  "Failed to copy in reply frame[%d], errno=%d\n",
3282 		  ReplySizeInBytes, error);
3283 		return (EINVAL);
3284 	}
3285 
3286 	if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)kmalloc (
3287 	  ((ReplySizeInBytes > sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME))
3288 	    ? ReplySizeInBytes : sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)),
3289 	  M_TEMP, M_WAITOK)) == NULL) {
3290 		kfree(Message_Ptr, M_TEMP);
3291 		debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3292 		  ReplySizeInBytes);
3293 		return (ENOMEM);
3294 	}
3295 	(void)ASR_fillMessage((void *)Reply_Ptr, ReplySizeInBytes);
3296 	Reply_Ptr->StdReplyFrame.StdMessageFrame.InitiatorContext
3297 	  = Message_Ptr->InitiatorContext;
3298 	Reply_Ptr->StdReplyFrame.TransactionContext
3299 	  = ((PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr)->TransactionContext;
3300 	I2O_MESSAGE_FRAME_setMsgFlags(
3301 	  &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3302 	  I2O_MESSAGE_FRAME_getMsgFlags(
3303 	    &(Reply_Ptr->StdReplyFrame.StdMessageFrame))
3304 	      | I2O_MESSAGE_FLAGS_REPLY);
3305 
3306 	/* Check if the message is a special case command */
3307 	switch (I2O_MESSAGE_FRAME_getFunction(Message_Ptr)) {
3308 	case I2O_EXEC_SYS_TAB_SET: /* Special Case of empty Scatter Gather */
3309 		if (MessageSizeInBytes == ((I2O_MESSAGE_FRAME_getVersionOffset(
3310 		  Message_Ptr) & 0xF0) >> 2)) {
3311 			kfree(Message_Ptr, M_TEMP);
3312 			I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3313 			  &(Reply_Ptr->StdReplyFrame),
3314 			  (ASR_setSysTab(sc) != CAM_REQ_CMP));
3315 			I2O_MESSAGE_FRAME_setMessageSize(
3316 			  &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3317 			  sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME));
3318 			error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply,
3319 			  ReplySizeInBytes);
3320 			kfree(Reply_Ptr, M_TEMP);
3321 			return (error);
3322 		}
3323 	}
3324 
3325 	/* Deal in the general case */
3326 	/* First allocate and optionally copy in each scatter gather element */
3327 	SLIST_INIT(&sgList);
3328 	if ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) != 0) {
3329 		PI2O_SGE_SIMPLE_ELEMENT sg;
3330 
3331 		/*
3332 		 *	since this code is reused in several systems, code
3333 		 * efficiency is greater by using a shift operation rather
3334 		 * than a divide by sizeof(u_int32_t).
3335 		 */
3336 		sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3337 		  + ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0)
3338 		    >> 2));
3339 		while (sg < (PI2O_SGE_SIMPLE_ELEMENT)(((caddr_t)Message_Ptr)
3340 		  + MessageSizeInBytes)) {
3341 			caddr_t v;
3342 			int	len;
3343 
3344 			if ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3345 			 & I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT) == 0) {
3346 				error = EINVAL;
3347 				break;
3348 			}
3349 			len = I2O_FLAGS_COUNT_getCount(&(sg->FlagsCount));
3350 			debug_usr_cmd_printf ("SG[%d] = %x[%d]\n",
3351 			  sg - (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3352 			  + ((I2O_MESSAGE_FRAME_getVersionOffset(
3353 				Message_Ptr) & 0xF0) >> 2)),
3354 			  I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), len);
3355 
3356 			if ((elm = (struct ioctlSgList_S *)kmalloc (
3357 			  sizeof(*elm) - sizeof(elm->KernelSpace) + len,
3358 			  M_TEMP, M_WAITOK)) == NULL) {
3359 				debug_usr_cmd_printf (
3360 				  "Failed to allocate SG[%d]\n", len);
3361 				error = ENOMEM;
3362 				break;
3363 			}
3364 			SLIST_INSERT_HEAD(&sgList, elm, link);
3365 			elm->FlagsCount = sg->FlagsCount;
3366 			elm->UserSpace = (caddr_t)
3367 			  (I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg));
3368 			v = elm->KernelSpace;
3369 			/* Copy in outgoing data (DIR bit could be invalid) */
3370 			if ((error = copyin (elm->UserSpace, (caddr_t)v, len))
3371 			  != 0) {
3372 				break;
3373 			}
3374 			/*
3375 			 *	If the buffer is not contiguous, lets
3376 			 * break up the scatter/gather entries.
3377 			 */
3378 			while ((len > 0)
3379 			 && (sg < (PI2O_SGE_SIMPLE_ELEMENT)
3380 			  (((caddr_t)Message_Ptr) + MAX_INBOUND_SIZE))) {
3381 				int next, base, span;
3382 
3383 				span = 0;
3384 				next = base = KVTOPHYS(v);
3385 				I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg,
3386 				  base);
3387 
3388 				/* How far can we go physically contiguously */
3389 				while ((len > 0) && (base == next)) {
3390 					int size;
3391 
3392 					next = trunc_page(base) + PAGE_SIZE;
3393 					size = next - base;
3394 					if (size > len) {
3395 						size = len;
3396 					}
3397 					span += size;
3398 					v += size;
3399 					len -= size;
3400 					base = KVTOPHYS(v);
3401 				}
3402 
3403 				/* Construct the Flags */
3404 				I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount),
3405 				  span);
3406 				{
3407 					int flags = I2O_FLAGS_COUNT_getFlags(
3408 					  &(elm->FlagsCount));
3409 					/* Any remaining length? */
3410 					if (len > 0) {
3411 					    flags &=
3412 						~(I2O_SGL_FLAGS_END_OF_BUFFER
3413 						 | I2O_SGL_FLAGS_LAST_ELEMENT);
3414 					}
3415 					I2O_FLAGS_COUNT_setFlags(
3416 					  &(sg->FlagsCount), flags);
3417 				}
3418 
3419 				debug_usr_cmd_printf ("sg[%d] = %x[%d]\n",
3420 				  sg - (PI2O_SGE_SIMPLE_ELEMENT)
3421 				    ((char *)Message_Ptr
3422 				  + ((I2O_MESSAGE_FRAME_getVersionOffset(
3423 					Message_Ptr) & 0xF0) >> 2)),
3424 				  I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg),
3425 				  span);
3426 				if (len <= 0) {
3427 					break;
3428 				}
3429 
3430 				/*
3431 				 * Incrementing requires resizing of the
3432 				 * packet, and moving up the existing SG
3433 				 * elements.
3434 				 */
3435 				++sg;
3436 				MessageSizeInBytes += sizeof(*sg);
3437 				I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
3438 				  I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)
3439 				  + (sizeof(*sg) / sizeof(U32)));
3440 				{
3441 					PI2O_MESSAGE_FRAME NewMessage_Ptr;
3442 
3443 					if ((NewMessage_Ptr
3444 					  = (PI2O_MESSAGE_FRAME)
3445 					    kmalloc (MessageSizeInBytes,
3446 					     M_TEMP, M_WAITOK)) == NULL) {
3447 						debug_usr_cmd_printf (
3448 						  "Failed to acquire frame[%d] memory\n",
3449 						  MessageSizeInBytes);
3450 						error = ENOMEM;
3451 						break;
3452 					}
3453 					span = ((caddr_t)sg)
3454 					     - (caddr_t)Message_Ptr;
3455 					bcopy(Message_Ptr,NewMessage_Ptr, span);
3456 					bcopy((caddr_t)(sg-1),
3457 					  ((caddr_t)NewMessage_Ptr) + span,
3458 					  MessageSizeInBytes - span);
3459 					kfree(Message_Ptr, M_TEMP);
3460 					sg = (PI2O_SGE_SIMPLE_ELEMENT)
3461 					  (((caddr_t)NewMessage_Ptr) + span);
3462 					Message_Ptr = NewMessage_Ptr;
3463 				}
3464 			}
3465 			if ((error)
3466 			 || ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3467 			  & I2O_SGL_FLAGS_LAST_ELEMENT) != 0)) {
3468 				break;
3469 			}
3470 			++sg;
3471 		}
3472 		if (error) {
3473 			while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3474 				SLIST_REMOVE_HEAD(&sgList, link);
3475 				kfree(elm, M_TEMP);
3476 			}
3477 			kfree(Reply_Ptr, M_TEMP);
3478 			kfree(Message_Ptr, M_TEMP);
3479 			return (error);
3480 		}
3481 	}
3482 
3483 	debug_usr_cmd_printf ("Inbound: ");
3484 	debug_usr_cmd_dump_message(Message_Ptr);
3485 
3486 	/* Send the command */
3487 	if ((ccb = asr_alloc_ccb (sc)) == NULL) {
3488 		/* Free up in-kernel buffers */
3489 		while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3490 			SLIST_REMOVE_HEAD(&sgList, link);
3491 			kfree(elm, M_TEMP);
3492 		}
3493 		kfree(Reply_Ptr, M_TEMP);
3494 		kfree(Message_Ptr, M_TEMP);
3495 		return (ENOMEM);
3496 	}
3497 
3498 	/*
3499 	 * We do not need any (optional byteswapping) method access to
3500 	 * the Initiator context field.
3501 	 */
3502 	I2O_MESSAGE_FRAME_setInitiatorContext64(
3503 	  (PI2O_MESSAGE_FRAME)Message_Ptr, (long)ccb);
3504 
3505 	(void)ASR_queue (sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
3506 
3507 	kfree(Message_Ptr, M_TEMP);
3508 
3509 	/*
3510 	 * Wait for the board to report a finished instruction.
3511 	 */
3512 	crit_enter();
3513 	while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
3514 		if (ASR_getBlinkLedCode(sc)) {
3515 			/* Reset Adapter */
3516 			kprintf ("asr%d: Blink LED 0x%x resetting adapter\n",
3517 			  cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
3518 			  ASR_getBlinkLedCode(sc));
3519 			if (ASR_reset (sc) == ENXIO) {
3520 				/* Command Cleanup */
3521 				ASR_ccbRemove(sc, ccb);
3522 			}
3523 			crit_exit();
3524 			/* Free up in-kernel buffers */
3525 			while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3526 				SLIST_REMOVE_HEAD(&sgList, link);
3527 				kfree(elm, M_TEMP);
3528 			}
3529 			kfree(Reply_Ptr, M_TEMP);
3530 			asr_free_ccb(ccb);
3531 			return (EIO);
3532 		}
3533 		/* Check every second for BlinkLed */
3534 		/* There is no PRICAM, but outwardly PRIBIO is functional */
3535 		tsleep(ccb, 0, "asr", hz);
3536 	}
3537 	crit_exit();
3538 
3539 	debug_usr_cmd_printf ("Outbound: ");
3540 	debug_usr_cmd_dump_message(Reply_Ptr);
3541 
3542 	I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3543 	  &(Reply_Ptr->StdReplyFrame),
3544 	  (ccb->ccb_h.status != CAM_REQ_CMP));
3545 
3546 	if (ReplySizeInBytes >= (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3547 	  - I2O_SCSI_SENSE_DATA_SZ - sizeof(U32))) {
3548 		I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setTransferCount(Reply_Ptr,
3549 		  ccb->csio.dxfer_len - ccb->csio.resid);
3550 	}
3551 	if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) && (ReplySizeInBytes
3552 	 > (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3553 	 - I2O_SCSI_SENSE_DATA_SZ))) {
3554 		int size = ReplySizeInBytes
3555 		  - sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3556 		  - I2O_SCSI_SENSE_DATA_SZ;
3557 
3558 		if (size > sizeof(ccb->csio.sense_data)) {
3559 			size = sizeof(ccb->csio.sense_data);
3560 		}
3561 		if (size < ccb->csio.sense_len) {
3562 			ccb->csio.sense_resid = ccb->csio.sense_len - size;
3563 		} else {
3564 			ccb->csio.sense_resid = 0;
3565 		}
3566 		bzero(&(ccb->csio.sense_data), sizeof(ccb->csio.sense_data));
3567 		bcopy(&(ccb->csio.sense_data), Reply_Ptr->SenseData, size);
3568 		I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setAutoSenseTransferCount(
3569 		    Reply_Ptr, size);
3570 	}
3571 
3572 	/* Free up in-kernel buffers */
3573 	while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3574 		/* Copy out as necessary */
3575 		if ((error == 0)
3576 		/* DIR bit considered `valid', error due to ignorance works */
3577 		 && ((I2O_FLAGS_COUNT_getFlags(&(elm->FlagsCount))
3578 		  & I2O_SGL_FLAGS_DIR) == 0)) {
3579 			error = copyout((caddr_t)(elm->KernelSpace),
3580 			  elm->UserSpace,
3581 			  I2O_FLAGS_COUNT_getCount(&(elm->FlagsCount)));
3582 		}
3583 		SLIST_REMOVE_HEAD(&sgList, link);
3584 		kfree(elm, M_TEMP);
3585 	}
3586 	if (error == 0) {
3587 	/* Copy reply frame to user space */
3588 		error = copyout((caddr_t)Reply_Ptr, (caddr_t)Reply,
3589 				ReplySizeInBytes);
3590 	}
3591 	kfree(Reply_Ptr, M_TEMP);
3592 	asr_free_ccb(ccb);
3593 
3594 	return (error);
3595 } /* ASR_queue_i */
3596 
3597 /*----------------------------------------------------------------------*/
3598 /*			    Function asr_ioctl			       */
3599 /*----------------------------------------------------------------------*/
3600 /* The parameters passed to this function are :				*/
3601 /*     dev  : Device number.						*/
3602 /*     cmd  : Ioctl Command						*/
3603 /*     data : User Argument Passed In.					*/
3604 /*     flag : Mode Parameter						*/
3605 /*     proc : Process Parameter						*/
3606 /*									*/
3607 /* This function is the user interface into this adapter driver		*/
3608 /*									*/
3609 /* Return : zero if OK, error code if not				*/
3610 /*----------------------------------------------------------------------*/
3611 
3612 static int
3613 asr_ioctl(struct dev_ioctl_args *ap)
3614 {
3615 	cdev_t dev = ap->a_head.a_dev;
3616 	u_long cmd = ap->a_cmd;
3617 	caddr_t data = ap->a_data;
3618 	Asr_softc_t	*sc = dev->si_drv1;
3619 	int		i, error = 0;
3620 #ifdef ASR_IOCTL_COMPAT
3621 	int		j;
3622 #endif /* ASR_IOCTL_COMPAT */
3623 
3624 	if (sc != NULL)
3625 	switch(cmd) {
3626 
3627 	case DPT_SIGNATURE:
3628 #ifdef ASR_IOCTL_COMPAT
3629 #if (dsDescription_size != 50)
3630 	case DPT_SIGNATURE + ((50 - dsDescription_size) << 16):
3631 #endif
3632 		if (cmd & 0xFFFF0000) {
3633 			bcopy(&ASR_sig, data, sizeof(dpt_sig_S));
3634 			return (0);
3635 		}
3636 	/* Traditional version of the ioctl interface */
3637 	case DPT_SIGNATURE & 0x0000FFFF:
3638 #endif
3639 		return (copyout((caddr_t)(&ASR_sig), *((caddr_t *)data),
3640 				sizeof(dpt_sig_S)));
3641 
3642 	/* Traditional version of the ioctl interface */
3643 	case DPT_CTRLINFO & 0x0000FFFF:
3644 	case DPT_CTRLINFO: {
3645 		struct {
3646 			u_int16_t length;
3647 			u_int16_t drvrHBAnum;
3648 			u_int32_t baseAddr;
3649 			u_int16_t blinkState;
3650 			u_int8_t  pciBusNum;
3651 			u_int8_t  pciDeviceNum;
3652 			u_int16_t hbaFlags;
3653 			u_int16_t Interrupt;
3654 			u_int32_t reserved1;
3655 			u_int32_t reserved2;
3656 			u_int32_t reserved3;
3657 		} CtlrInfo;
3658 
3659 		bzero(&CtlrInfo, sizeof(CtlrInfo));
3660 		CtlrInfo.length = sizeof(CtlrInfo) - sizeof(u_int16_t);
3661 		CtlrInfo.drvrHBAnum = asr_unit(dev);
3662 		CtlrInfo.baseAddr = sc->ha_Base;
3663 		i = ASR_getBlinkLedCode (sc);
3664 		if (i == -1)
3665 			i = 0;
3666 
3667 		CtlrInfo.blinkState = i;
3668 		CtlrInfo.pciBusNum = sc->ha_pciBusNum;
3669 		CtlrInfo.pciDeviceNum = sc->ha_pciDeviceNum;
3670 #define	FLG_OSD_PCI_VALID 0x0001
3671 #define	FLG_OSD_DMA	  0x0002
3672 #define	FLG_OSD_I2O	  0x0004
3673 		CtlrInfo.hbaFlags = FLG_OSD_PCI_VALID|FLG_OSD_DMA|FLG_OSD_I2O;
3674 		CtlrInfo.Interrupt = sc->ha_irq;
3675 #ifdef ASR_IOCTL_COMPAT
3676 		if (cmd & 0xffff0000)
3677 			bcopy(&CtlrInfo, data, sizeof(CtlrInfo));
3678 		else
3679 #endif /* ASR_IOCTL_COMPAT */
3680 		error = copyout(&CtlrInfo, *(caddr_t *)data, sizeof(CtlrInfo));
3681 	}	return (error);
3682 
3683 	/* Traditional version of the ioctl interface */
3684 	case DPT_SYSINFO & 0x0000FFFF:
3685 	case DPT_SYSINFO: {
3686 		sysInfo_S	Info;
3687 #ifdef ASR_IOCTL_COMPAT
3688 		char	      * cp;
3689 		/* Kernel Specific ptok `hack' */
3690 #define		ptok(a) ((char *)(uintptr_t)(a) + KERNBASE)
3691 
3692 		bzero(&Info, sizeof(Info));
3693 
3694 		/* Appears I am the only person in the Kernel doing this */
3695 		outb (0x70, 0x12);
3696 		i = inb(0x71);
3697 		j = i >> 4;
3698 		if (i == 0x0f) {
3699 			outb (0x70, 0x19);
3700 			j = inb (0x71);
3701 		}
3702 		Info.drive0CMOS = j;
3703 
3704 		j = i & 0x0f;
3705 		if (i == 0x0f) {
3706 			outb (0x70, 0x1a);
3707 			j = inb (0x71);
3708 		}
3709 		Info.drive1CMOS = j;
3710 
3711 		Info.numDrives = *((char *)ptok(0x475));
3712 #else /* ASR_IOCTL_COMPAT */
3713 		bzero(&Info, sizeof(Info));
3714 #endif /* ASR_IOCTL_COMPAT */
3715 
3716 		Info.processorFamily = ASR_sig.dsProcessorFamily;
3717 #if defined(__i386__)
3718 		switch (cpu) {
3719 		case CPU_386SX: case CPU_386:
3720 			Info.processorType = PROC_386; break;
3721 		case CPU_486SX: case CPU_486:
3722 			Info.processorType = PROC_486; break;
3723 		case CPU_586:
3724 			Info.processorType = PROC_PENTIUM; break;
3725 		case CPU_686:
3726 			Info.processorType = PROC_SEXIUM; break;
3727 		}
3728 #endif
3729 
3730 		Info.osType = OS_BSDI_UNIX;
3731 		Info.osMajorVersion = osrelease[0] - '0';
3732 		Info.osMinorVersion = osrelease[2] - '0';
3733 		/* Info.osRevision = 0; */
3734 		/* Info.osSubRevision = 0; */
3735 		Info.busType = SI_PCI_BUS;
3736 		Info.flags = SI_OSversionValid|SI_BusTypeValid|SI_NO_SmartROM;
3737 
3738 #ifdef ASR_IOCTL_COMPAT
3739 		Info.flags |= SI_CMOS_Valid | SI_NumDrivesValid;
3740 		/* Go Out And Look For I2O SmartROM */
3741 		for(j = 0xC8000; j < 0xE0000; j += 2048) {
3742 			int k;
3743 
3744 			cp = ptok(j);
3745 			if (*((unsigned short *)cp) != 0xAA55) {
3746 				continue;
3747 			}
3748 			j += (cp[2] * 512) - 2048;
3749 			if ((*((u_long *)(cp + 6))
3750 			  != ('S' + (' ' * 256) + (' ' * 65536L)))
3751 			 || (*((u_long *)(cp + 10))
3752 			  != ('I' + ('2' * 256) + ('0' * 65536L)))) {
3753 				continue;
3754 			}
3755 			cp += 0x24;
3756 			for (k = 0; k < 64; ++k) {
3757 				if (*((unsigned short *)cp)
3758 				 == (' ' + ('v' * 256))) {
3759 					break;
3760 				}
3761 			}
3762 			if (k < 64) {
3763 				Info.smartROMMajorVersion
3764 				    = *((unsigned char *)(cp += 4)) - '0';
3765 				Info.smartROMMinorVersion
3766 				    = *((unsigned char *)(cp += 2));
3767 				Info.smartROMRevision
3768 				    = *((unsigned char *)(++cp));
3769 				Info.flags |= SI_SmartROMverValid;
3770 				Info.flags &= ~SI_NO_SmartROM;
3771 				break;
3772 			}
3773 		}
3774 		/* Get The Conventional Memory Size From CMOS */
3775 		outb (0x70, 0x16);
3776 		j = inb (0x71);
3777 		j <<= 8;
3778 		outb (0x70, 0x15);
3779 		j |= inb(0x71);
3780 		Info.conventionalMemSize = j;
3781 
3782 		/* Get The Extended Memory Found At Power On From CMOS */
3783 		outb (0x70, 0x31);
3784 		j = inb (0x71);
3785 		j <<= 8;
3786 		outb (0x70, 0x30);
3787 		j |= inb(0x71);
3788 		Info.extendedMemSize = j;
3789 		Info.flags |= SI_MemorySizeValid;
3790 
3791 		/* Copy Out The Info Structure To The User */
3792 		if (cmd & 0xFFFF0000)
3793 			bcopy(&Info, data, sizeof(Info));
3794 		else
3795 #endif /* ASR_IOCTL_COMPAT */
3796 		error = copyout(&Info, *(caddr_t *)data, sizeof(Info));
3797 		return (error); }
3798 
3799 		/* Get The BlinkLED State */
3800 	case DPT_BLINKLED:
3801 		i = ASR_getBlinkLedCode (sc);
3802 		if (i == -1)
3803 			i = 0;
3804 #ifdef ASR_IOCTL_COMPAT
3805 		if (cmd & 0xffff0000)
3806 			bcopy(&i, data, sizeof(i));
3807 		else
3808 #endif /* ASR_IOCTL_COMPAT */
3809 		error = copyout(&i, *(caddr_t *)data, sizeof(i));
3810 		break;
3811 
3812 		/* Send an I2O command */
3813 	case I2OUSRCMD:
3814 		return (ASR_queue_i(sc, *((PI2O_MESSAGE_FRAME *)data)));
3815 
3816 		/* Reset and re-initialize the adapter */
3817 	case I2ORESETCMD:
3818 		return (ASR_reset(sc));
3819 
3820 		/* Rescan the LCT table and resynchronize the information */
3821 	case I2ORESCANCMD:
3822 		return (ASR_rescan(sc));
3823 	}
3824 	return (EINVAL);
3825 } /* asr_ioctl */
3826