xref: /dflybsd-src/sys/dev/raid/asr/asr.c (revision 2a53016d85f1096c9234a62db3a55aebc5227f1c)
1 /*-
2  * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
3  * Copyright (c) 2000-2001 Adaptec Corporation
4  * All rights reserved.
5  *
6  * TERMS AND CONDITIONS OF USE
7  *
8  * Redistribution and use in source form, with or without modification, are
9  * permitted provided that redistributions of source code must retain the
10  * above copyright notice, this list of conditions and the following disclaimer.
11  *
12  * This software is provided `as is' by Adaptec and any express or implied
13  * warranties, including, but not limited to, the implied warranties of
14  * merchantability and fitness for a particular purpose, are disclaimed. In no
15  * event shall Adaptec be liable for any direct, indirect, incidental, special,
16  * exemplary or consequential damages (including, but not limited to,
17  * procurement of substitute goods or services; loss of use, data, or profits;
18  * or business interruptions) however caused and on any theory of liability,
19  * whether in contract, strict liability, or tort (including negligence or
20  * otherwise) arising in any way out of the use of this driver software, even
21  * if advised of the possibility of such damage.
22  *
23  * SCSI I2O host adapter driver
24  *
25  *	V1.10 2004/05/05 scottl@freebsd.org
26  *		- Massive cleanup of the driver to remove dead code and
27  *		  non-conformant style.
28  *		- Removed most i386-specific code to make it more portable.
29  *		- Converted to the bus_space API.
30  *	V1.08 2001/08/21 Mark_Salyzyn@adaptec.com
31  *		- The 2000S and 2005S do not initialize on some machines,
32  *		  increased timeout to 255ms from 50ms for the StatusGet
33  *		  command.
34  *	V1.07 2001/05/22 Mark_Salyzyn@adaptec.com
35  *		- I knew this one was too good to be true. The error return
36  *		  on ioctl commands needs to be compared to CAM_REQ_CMP, not
37  *		  to the bit masked status.
38  *	V1.06 2001/05/08 Mark_Salyzyn@adaptec.com
39  *		- The 2005S that was supported is affectionately called the
40  *		  Conjoined BAR Firmware. In order to support RAID-5 in a
41  *		  16MB low-cost configuration, Firmware was forced to go
42  *		  to a Split BAR Firmware. This requires a separate IOP and
43  *		  Messaging base address.
44  *	V1.05 2001/04/25 Mark_Salyzyn@adaptec.com
45  *		- Handle support for 2005S Zero Channel RAID solution.
46  *		- System locked up if the Adapter locked up. Do not try
47  *		  to send other commands if the resetIOP command fails. The
48  *		  fail outstanding command discovery loop was flawed as the
49  *		  removal of the command from the list prevented discovering
50  *		  all the commands.
51  *		- Comment changes to clarify driver.
52  *		- SysInfo searched for an EATA SmartROM, not an I2O SmartROM.
53  *		- We do not use the AC_FOUND_DEV event because of I2O.
54  *		  Removed asr_async.
55  *	V1.04 2000/09/22 Mark_Salyzyn@adaptec.com, msmith@freebsd.org,
56  *			 lampa@fee.vutbr.cz and Scott_Long@adaptec.com.
57  *		- Removed support for PM1554, PM2554 and PM2654 in Mode-0
58  *		  mode as this is confused with competitor adapters in run
59  *		  mode.
60  *		- critical locking needed in ASR_ccbAdd and ASR_ccbRemove
61  *		  to prevent operating system panic.
62  *		- moved default major number to 154 from 97.
63  *	V1.03 2000/07/12 Mark_Salyzyn@adaptec.com
64  *		- The controller is not actually an ASR (Adaptec SCSI RAID)
65  *		  series that is visible, it's more of an internal code name.
66  *		  remove any visible references within reason for now.
67  *		- bus_ptr->LUN was not correctly zeroed when initially
68  *		  allocated causing a possible panic of the operating system
69  *		  during boot.
70  *	V1.02 2000/06/26 Mark_Salyzyn@adaptec.com
71  *		- Code always fails for ASR_getTid affecting performance.
72  *		- initiated a set of changes that resulted from a formal
73  *		  code inspection by Mark_Salyzyn@adaptec.com,
74  *		  George_Dake@adaptec.com, Jeff_Zeak@adaptec.com,
75  *		  Martin_Wilson@adaptec.com and Vincent_Trandoan@adaptec.com.
76  *		  Their findings were focussed on the LCT & TID handler, and
77  *		  all resulting changes were to improve code readability,
78  *		  consistency or have a positive effect on performance.
79  *	V1.01 2000/06/14 Mark_Salyzyn@adaptec.com
80  *		- Passthrough returned an incorrect error.
81  *		- Passthrough did not migrate the intrinsic scsi layer wakeup
82  *		  on command completion.
83  *		- generate control device nodes using make_dev and delete_dev.
84  *		- Performance affected by TID caching reallocing.
85  *		- Made suggested changes by Justin_Gibbs@adaptec.com
86  *			- use splcam instead of splbio.
87  *			- use cam_imask instead of bio_imask.
88  *			- use u_int8_t instead of u_char.
89  *			- use u_int16_t instead of u_short.
90  *			- use u_int32_t instead of u_long where appropriate.
91  *			- use 64 bit context handler instead of 32 bit.
92  *			- create_ccb should only allocate the worst case
93  *			  requirements for the driver since CAM may evolve
94  *			  making union ccb much larger than needed here.
95  *			  renamed create_ccb to asr_alloc_ccb.
96  *			- go nutz justifying all debug prints as macros
97  *			  defined at the top and remove unsightly ifdefs.
98  *			- INLINE STATIC viewed as confusing. Historically
99  *			  utilized to affect code performance and debug
100  *			  issues in OS, Compiler or OEM specific situations.
101  *	V1.00 2000/05/31 Mark_Salyzyn@adaptec.com
102  *		- Ported from FreeBSD 2.2.X DPT I2O driver.
103  *			changed struct scsi_xfer to union ccb/struct ccb_hdr
104  *			changed variable name xs to ccb
105  *			changed struct scsi_link to struct cam_path
106  *			changed struct scsibus_data to struct cam_sim
107  *			stopped using fordriver for holding on to the TID
108  *			use proprietary packet creation instead of scsi_inquire
109  *			CAM layer sends synchronize commands.
110  *
111  * $FreeBSD: src/sys/dev/asr/asr.c,v 1.90 2011/10/13 20:06:19 marius Exp $
112  */
113 
114 #include <sys/cdefs.h>
115 #include <sys/param.h>	/* TRUE=1 and FALSE=0 defined here */
116 #include <sys/kernel.h>
117 #include <sys/module.h>
118 #include <sys/systm.h>
119 #include <sys/malloc.h>
120 #include <sys/conf.h>
121 #include <sys/ioccom.h>
122 #include <sys/priv.h>
123 #include <sys/proc.h>
124 #include <sys/bus.h>
125 #include <sys/rman.h>
126 #include <sys/stat.h>
127 #include <sys/device.h>
128 #include <sys/thread2.h>
129 #include <sys/bus_dma.h>
130 
131 #include <bus/cam/cam.h>
132 #include <bus/cam/cam_ccb.h>
133 #include <bus/cam/cam_sim.h>
134 #include <bus/cam/cam_xpt_sim.h>
135 
136 #include <bus/cam/scsi/scsi_all.h>
137 #include <bus/cam/scsi/scsi_message.h>
138 
139 #include <vm/vm.h>
140 #include <vm/pmap.h>
141 
142 #if defined(__i386__)
143 #include "opt_asr.h"
144 #include <machine/cputypes.h>
145 
146 #if defined(ASR_COMPAT)
147 #define ASR_IOCTL_COMPAT
148 #endif /* ASR_COMPAT */
149 #endif
150 #include <machine/vmparam.h>
151 
152 #include <bus/pci/pcivar.h>
153 #include <bus/pci/pcireg.h>
154 
155 #define	osdSwap4(x) ((u_long)ntohl((u_long)(x)))
156 #define	KVTOPHYS(x) vtophys(x)
157 #include	<dev/raid/asr/dptalign.h>
158 #include	<dev/raid/asr/i2oexec.h>
159 #include	<dev/raid/asr/i2obscsi.h>
160 #include	<dev/raid/asr/i2odpt.h>
161 #include	<dev/raid/asr/i2oadptr.h>
162 
163 #include	<dev/raid/asr/sys_info.h>
164 
165 #define	ASR_VERSION	1
166 #define	ASR_REVISION	'1'
167 #define	ASR_SUBREVISION '0'
168 #define	ASR_MONTH	5
169 #define	ASR_DAY		5
170 #define	ASR_YEAR	(2004 - 1980)
171 
172 /*
173  *	Debug macros to reduce the unsightly ifdefs
174  */
175 #if (defined(DEBUG_ASR) || defined(DEBUG_ASR_USR_CMD) || defined(DEBUG_ASR_CMD))
176 static __inline void
177 debug_asr_message(PI2O_MESSAGE_FRAME message)
178 {
179 	u_int32_t * pointer = (u_int32_t *)message;
180 	u_int32_t   length = I2O_MESSAGE_FRAME_getMessageSize(message);
181 	u_int32_t   counter = 0;
182 
183 	while (length--) {
184 		kprintf("%08lx%c", (u_long)*(pointer++),
185 		  (((++counter & 7) == 0) || (length == 0)) ? '\n' : ' ');
186 	}
187 }
188 #endif /* DEBUG_ASR || DEBUG_ASR_USR_CMD || DEBUG_ASR_CMD */
189 
190 #ifdef DEBUG_ASR
191   /* Breaks on none STDC based compilers :-( */
192 #define debug_asr_printf(fmt,args...)	kprintf(fmt, ##args)
193 #define debug_asr_dump_message(message)	debug_asr_message(message)
194 #define debug_asr_print_path(ccb)	xpt_print_path(ccb->ccb_h.path);
195 #else /* DEBUG_ASR */
196 #define debug_asr_printf(fmt,args...)
197 #define debug_asr_dump_message(message)
198 #define debug_asr_print_path(ccb)
199 #endif /* DEBUG_ASR */
200 
201 /*
202  *	If DEBUG_ASR_CMD is defined:
203  *		0 - Display incoming SCSI commands
204  *		1 - add in a quick character before queueing.
205  *		2 - add in outgoing message frames.
206  */
207 #if (defined(DEBUG_ASR_CMD))
208 #define debug_asr_cmd_printf(fmt,args...)     kprintf(fmt,##args)
209 static __inline void
210 debug_asr_dump_ccb(union ccb *ccb)
211 {
212 	u_int8_t	*cp = (unsigned char *)&(ccb->csio.cdb_io);
213 	int		len = ccb->csio.cdb_len;
214 
215 	while (len) {
216 		debug_asr_cmd_printf (" %02x", *(cp++));
217 		--len;
218 	}
219 }
220 #if (DEBUG_ASR_CMD > 0)
221 #define debug_asr_cmd1_printf		       debug_asr_cmd_printf
222 #else
223 #define debug_asr_cmd1_printf(fmt,args...)
224 #endif
225 #if (DEBUG_ASR_CMD > 1)
226 #define debug_asr_cmd2_printf			debug_asr_cmd_printf
227 #define debug_asr_cmd2_dump_message(message)	debug_asr_message(message)
228 #else
229 #define debug_asr_cmd2_printf(fmt,args...)
230 #define debug_asr_cmd2_dump_message(message)
231 #endif
232 #else /* DEBUG_ASR_CMD */
233 #define debug_asr_cmd_printf(fmt,args...)
234 #define debug_asr_dump_ccb(ccb)
235 #define debug_asr_cmd1_printf(fmt,args...)
236 #define debug_asr_cmd2_printf(fmt,args...)
237 #define debug_asr_cmd2_dump_message(message)
238 #endif /* DEBUG_ASR_CMD */
239 
240 #if (defined(DEBUG_ASR_USR_CMD))
241 #define debug_usr_cmd_printf(fmt,args...)   kprintf(fmt,##args)
242 #define debug_usr_cmd_dump_message(message) debug_usr_message(message)
243 #else /* DEBUG_ASR_USR_CMD */
244 #define debug_usr_cmd_printf(fmt,args...)
245 #define debug_usr_cmd_dump_message(message)
246 #endif /* DEBUG_ASR_USR_CMD */
247 
248 #ifdef ASR_IOCTL_COMPAT
249 #define	dsDescription_size 46	/* Snug as a bug in a rug */
250 #endif /* ASR_IOCTL_COMPAT */
251 
252 #include "dev/raid/asr/dptsig.h"
253 
254 static dpt_sig_S ASR_sig = {
255 	{ 'd', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, PROC_INTEL,
256 	PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, FT_HBADRVR, 0,
257 	OEM_DPT, OS_FREE_BSD, CAP_ABOVE16MB, DEV_ALL, ADF_ALL_SC5,
258 	0, 0, ASR_VERSION, ASR_REVISION, ASR_SUBREVISION,
259 	ASR_MONTH, ASR_DAY, ASR_YEAR,
260 /*	 01234567890123456789012345678901234567890123456789	< 50 chars */
261 	"Adaptec FreeBSD 4.0.0 Unix SCSI I2O HBA Driver"
262 	/*		 ^^^^^ asr_attach alters these to match OS */
263 };
264 
265 /* Configuration Definitions */
266 
267 #define	SG_SIZE		 58	/* Scatter Gather list Size		 */
268 #define	MAX_TARGET_ID	 126	/* Maximum Target ID supported		 */
269 #define	MAX_LUN		 255	/* Maximum LUN Supported		 */
270 #define	MAX_CHANNEL	 7	/* Maximum Channel # Supported by driver */
271 #define	MAX_INBOUND	 2000	/* Max CCBs, Also Max Queue Size	 */
272 #define	MAX_OUTBOUND	 256	/* Maximum outbound frames/adapter	 */
273 #define	MAX_INBOUND_SIZE 512	/* Maximum inbound frame size		 */
274 #define	MAX_MAP		 4194304L /* Maximum mapping size of IOP	 */
275 				/* Also serves as the minimum map for	 */
276 				/* the 2005S zero channel RAID product	 */
277 
278 /* I2O register set */
279 #define	I2O_REG_STATUS		0x30
280 #define	I2O_REG_MASK		0x34
281 #define	I2O_REG_TOFIFO		0x40
282 #define	I2O_REG_FROMFIFO	0x44
283 
284 #define	Mask_InterruptsDisabled	0x08
285 
286 /*
287  * A MIX of performance and space considerations for TID lookups
288  */
289 typedef u_int16_t tid_t;
290 
291 typedef struct {
292 	u_int32_t size;		/* up to MAX_LUN    */
293 	tid_t	  TID[1];
294 } lun2tid_t;
295 
296 typedef struct {
297 	u_int32_t   size;	/* up to MAX_TARGET */
298 	lun2tid_t * LUN[1];
299 } target2lun_t;
300 
301 /*
302  *	To ensure that we only allocate and use the worst case ccb here, lets
303  *	make our own local ccb union. If asr_alloc_ccb is utilized for another
304  *	ccb type, ensure that you add the additional structures into our local
305  *	ccb union. To ensure strict type checking, we will utilize the local
306  *	ccb definition wherever possible.
307  */
308 union asr_ccb {
309 	struct ccb_hdr	    ccb_h;  /* For convenience */
310 	struct ccb_scsiio   csio;
311 	struct ccb_setasync csa;
312 };
313 
314 struct Asr_status_mem {
315 	I2O_EXEC_STATUS_GET_REPLY	status;
316 	U32				rstatus;
317 };
318 
319 /**************************************************************************
320 ** ASR Host Adapter structure - One Structure For Each Host Adapter That **
321 **  Is Configured Into The System.  The Structure Supplies Configuration **
322 **  Information, Status Info, Queue Info And An Active CCB List Pointer. **
323 ***************************************************************************/
324 
325 typedef struct Asr_softc {
326 	device_t		ha_dev;
327 	u_int16_t		ha_irq;
328 	u_long			ha_Base;       /* base port for each board */
329 	bus_size_t		ha_blinkLED;
330 	bus_space_handle_t	ha_i2o_bhandle;
331 	bus_space_tag_t		ha_i2o_btag;
332 	bus_space_handle_t	ha_frame_bhandle;
333 	bus_space_tag_t		ha_frame_btag;
334 	I2O_IOP_ENTRY		ha_SystemTable;
335 	LIST_HEAD(,ccb_hdr)	ha_ccb;	       /* ccbs in use		   */
336 
337 	bus_dma_tag_t		ha_parent_dmat;
338 	bus_dma_tag_t		ha_statusmem_dmat;
339 	bus_dmamap_t		ha_statusmem_dmamap;
340 	struct Asr_status_mem * ha_statusmem;
341 	u_int32_t		ha_rstatus_phys;
342 	u_int32_t		ha_status_phys;
343 	struct cam_path	      * ha_path[MAX_CHANNEL+1];
344 	struct cam_sim	      * ha_sim[MAX_CHANNEL+1];
345 	struct resource	      * ha_mem_res;
346 	struct resource	      * ha_mes_res;
347 	struct resource	      * ha_irq_res;
348 	void		      * ha_intr;
349 	PI2O_LCT		ha_LCT;	       /* Complete list of devices */
350 #define le_type	  IdentityTag[0]
351 #define I2O_BSA	    0x20
352 #define I2O_FCA	    0x40
353 #define I2O_SCSI    0x00
354 #define I2O_PORT    0x80
355 #define I2O_UNKNOWN 0x7F
356 #define le_bus	  IdentityTag[1]
357 #define le_target IdentityTag[2]
358 #define le_lun	  IdentityTag[3]
359 	target2lun_t	      * ha_targets[MAX_CHANNEL+1];
360 	PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME ha_Msgs;
361 	u_long			ha_Msgs_Phys;
362 
363 	u_int8_t		ha_in_reset;
364 #define HA_OPERATIONAL	    0
365 #define HA_IN_RESET	    1
366 #define HA_OFF_LINE	    2
367 #define HA_OFF_LINE_RECOVERY 3
368 	/* Configuration information */
369 	/* The target id maximums we take */
370 	u_int8_t		ha_MaxBus;     /* Maximum bus */
371 	u_int8_t		ha_MaxId;      /* Maximum target ID */
372 	u_int8_t		ha_MaxLun;     /* Maximum target LUN */
373 	u_int8_t		ha_SgSize;     /* Max SG elements */
374 	u_int8_t		ha_pciBusNum;
375 	u_int8_t		ha_pciDeviceNum;
376 	u_int8_t		ha_adapter_target[MAX_CHANNEL+1];
377 	u_int16_t		ha_QueueSize;  /* Max outstanding commands */
378 	u_int16_t		ha_Msgs_Count;
379 
380 	/* Links into other parents and HBAs */
381 	struct Asr_softc      * ha_next;       /* HBA list */
382 	struct cdev *ha_devt;
383 } Asr_softc_t;
384 
385 static Asr_softc_t *Asr_softc_list;
386 
387 /*
388  *	Prototypes of the routines we have in this object.
389  */
390 
391 /* I2O HDM interface */
392 static int	asr_probe(device_t dev);
393 static int	asr_attach(device_t dev);
394 
395 static d_ioctl_t asr_ioctl;
396 static d_open_t asr_open;
397 static d_close_t asr_close;
398 static int	asr_intr(Asr_softc_t *sc);
399 static void	asr_timeout(void *arg);
400 static int	ASR_init(Asr_softc_t *sc);
401 static int	ASR_acquireLct(Asr_softc_t *sc);
402 static int	ASR_acquireHrt(Asr_softc_t *sc);
403 static void	asr_action(struct cam_sim *sim, union ccb *ccb);
404 static void	asr_poll(struct cam_sim *sim);
405 static int	ASR_queue(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message);
406 
407 /*
408  *	Here is the auto-probe structure used to nest our tests appropriately
409  *	during the startup phase of the operating system.
410  */
411 static device_method_t asr_methods[] = {
412 	DEVMETHOD(device_probe,	 asr_probe),
413 	DEVMETHOD(device_attach, asr_attach),
414 	{ 0, 0 }
415 };
416 
417 static driver_t asr_driver = {
418 	"asr",
419 	asr_methods,
420 	sizeof(Asr_softc_t)
421 };
422 
423 static devclass_t asr_devclass;
424 DRIVER_MODULE(asr, pci, asr_driver, asr_devclass, 0, 0);
425 MODULE_VERSION(asr, 1);
426 MODULE_DEPEND(asr, pci, 1, 1, 1);
427 MODULE_DEPEND(asr, cam, 1, 1, 1);
428 
429 /*
430  * devsw for asr hba driver
431  *
432  * only ioctl is used. the sd driver provides all other access.
433  */
434 static struct dev_ops asr_ops = {
435 	{ "asr", 0, 0 },
436 	.d_open =	asr_open,
437 	.d_close =	asr_close,
438 	.d_ioctl =	asr_ioctl,
439 };
440 
441 /* I2O support routines */
442 
443 static __inline u_int32_t
444 asr_get_FromFIFO(Asr_softc_t *sc)
445 {
446 	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
447 				 I2O_REG_FROMFIFO));
448 }
449 
450 static __inline u_int32_t
451 asr_get_ToFIFO(Asr_softc_t *sc)
452 {
453 	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
454 				 I2O_REG_TOFIFO));
455 }
456 
457 static __inline u_int32_t
458 asr_get_intr(Asr_softc_t *sc)
459 {
460 	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
461 				 I2O_REG_MASK));
462 }
463 
464 static __inline u_int32_t
465 asr_get_status(Asr_softc_t *sc)
466 {
467 	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
468 				 I2O_REG_STATUS));
469 }
470 
471 static __inline void
472 asr_set_FromFIFO(Asr_softc_t *sc, u_int32_t val)
473 {
474 	bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_FROMFIFO,
475 			  val);
476 }
477 
478 static __inline void
479 asr_set_ToFIFO(Asr_softc_t *sc, u_int32_t val)
480 {
481 	bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_TOFIFO,
482 			  val);
483 }
484 
485 static __inline void
486 asr_set_intr(Asr_softc_t *sc, u_int32_t val)
487 {
488 	bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_MASK,
489 			  val);
490 }
491 
492 static __inline void
493 asr_set_frame(Asr_softc_t *sc, void *frame, u_int32_t offset, int len)
494 {
495 	bus_space_write_region_4(sc->ha_frame_btag, sc->ha_frame_bhandle,
496 				 offset, (u_int32_t *)frame, len);
497 }
498 
499 /*
500  *	Fill message with default.
501  */
502 static PI2O_MESSAGE_FRAME
503 ASR_fillMessage(void *Message, u_int16_t size)
504 {
505 	PI2O_MESSAGE_FRAME Message_Ptr;
506 
507 	Message_Ptr = (I2O_MESSAGE_FRAME *)Message;
508 	bzero(Message_Ptr, size);
509 	I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11);
510 	I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
511 	  (size + sizeof(U32) - 1) >> 2);
512 	I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
513 	KASSERT(Message_Ptr != NULL, ("Message_Ptr == NULL"));
514 	return (Message_Ptr);
515 } /* ASR_fillMessage */
516 
517 #define	EMPTY_QUEUE (0xffffffff)
518 
519 static __inline U32
520 ASR_getMessage(Asr_softc_t *sc)
521 {
522 	U32	MessageOffset;
523 
524 	MessageOffset = asr_get_ToFIFO(sc);
525 	if (MessageOffset == EMPTY_QUEUE)
526 		MessageOffset = asr_get_ToFIFO(sc);
527 
528 	return (MessageOffset);
529 } /* ASR_getMessage */
530 
531 /* Issue a polled command */
532 static U32
533 ASR_initiateCp(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
534 {
535 	U32	Mask = 0xffffffff;
536 	U32	MessageOffset;
537 	u_int	Delay = 1500;
538 
539 	/*
540 	 * ASR_initiateCp is only used for synchronous commands and will
541 	 * be made more resiliant to adapter delays since commands like
542 	 * resetIOP can cause the adapter to be deaf for a little time.
543 	 */
544 	while (((MessageOffset = ASR_getMessage(sc)) == EMPTY_QUEUE)
545 	 && (--Delay != 0)) {
546 		DELAY (10000);
547 	}
548 	if (MessageOffset != EMPTY_QUEUE) {
549 		asr_set_frame(sc, Message, MessageOffset,
550 			      I2O_MESSAGE_FRAME_getMessageSize(Message));
551 		/*
552 		 *	Disable the Interrupts
553 		 */
554 		Mask = asr_get_intr(sc);
555 		asr_set_intr(sc, Mask | Mask_InterruptsDisabled);
556 		asr_set_ToFIFO(sc, MessageOffset);
557 	}
558 	return (Mask);
559 } /* ASR_initiateCp */
560 
561 /*
562  *	Reset the adapter.
563  */
564 static U32
565 ASR_resetIOP(Asr_softc_t *sc)
566 {
567 	I2O_EXEC_IOP_RESET_MESSAGE	 Message;
568 	PI2O_EXEC_IOP_RESET_MESSAGE	 Message_Ptr;
569 	U32			       * Reply_Ptr;
570 	U32				 Old;
571 
572 	/*
573 	 *  Build up our copy of the Message.
574 	 */
575 	Message_Ptr = (PI2O_EXEC_IOP_RESET_MESSAGE)ASR_fillMessage(&Message,
576 	  sizeof(I2O_EXEC_IOP_RESET_MESSAGE));
577 	I2O_EXEC_IOP_RESET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_IOP_RESET);
578 	/*
579 	 *  Reset the Reply Status
580 	 */
581 	Reply_Ptr = &sc->ha_statusmem->rstatus;
582 	*Reply_Ptr = 0;
583 	I2O_EXEC_IOP_RESET_MESSAGE_setStatusWordLowAddress(Message_Ptr,
584 	    sc->ha_rstatus_phys);
585 	/*
586 	 *	Send the Message out
587 	 */
588 	if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
589 	     0xffffffff) {
590 		/*
591 		 * Wait for a response (Poll), timeouts are dangerous if
592 		 * the card is truly responsive. We assume response in 2s.
593 		 */
594 		u_int8_t Delay = 200;
595 
596 		while ((*Reply_Ptr == 0) && (--Delay != 0)) {
597 			DELAY (10000);
598 		}
599 		/*
600 		 *	Re-enable the interrupts.
601 		 */
602 		asr_set_intr(sc, Old);
603 		KASSERT(*Reply_Ptr != 0, ("*Reply_Ptr == 0"));
604 		return(*Reply_Ptr);
605 	}
606 	KASSERT(Old != 0xffffffff, ("Old == -1"));
607 	return (0);
608 } /* ASR_resetIOP */
609 
610 /*
611  *	Get the curent state of the adapter
612  */
613 static PI2O_EXEC_STATUS_GET_REPLY
614 ASR_getStatus(Asr_softc_t *sc)
615 {
616 	I2O_EXEC_STATUS_GET_MESSAGE	Message;
617 	PI2O_EXEC_STATUS_GET_MESSAGE	Message_Ptr;
618 	PI2O_EXEC_STATUS_GET_REPLY	buffer;
619 	U32				Old;
620 
621 	/*
622 	 *  Build up our copy of the Message.
623 	 */
624 	Message_Ptr = (PI2O_EXEC_STATUS_GET_MESSAGE)ASR_fillMessage(&Message,
625 	    sizeof(I2O_EXEC_STATUS_GET_MESSAGE));
626 	I2O_EXEC_STATUS_GET_MESSAGE_setFunction(Message_Ptr,
627 	    I2O_EXEC_STATUS_GET);
628 	I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferAddressLow(Message_Ptr,
629 	    sc->ha_status_phys);
630 	/* This one is a Byte Count */
631 	I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferLength(Message_Ptr,
632 	    sizeof(I2O_EXEC_STATUS_GET_REPLY));
633 	/*
634 	 *  Reset the Reply Status
635 	 */
636 	buffer = &sc->ha_statusmem->status;
637 	bzero(buffer, sizeof(I2O_EXEC_STATUS_GET_REPLY));
638 	/*
639 	 *	Send the Message out
640 	 */
641 	if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
642 	    0xffffffff) {
643 		/*
644 		 *	Wait for a response (Poll), timeouts are dangerous if
645 		 * the card is truly responsive. We assume response in 50ms.
646 		 */
647 		u_int8_t Delay = 255;
648 
649 		while (*((U8 * volatile)&(buffer->SyncByte)) == 0) {
650 			if (--Delay == 0) {
651 				buffer = NULL;
652 				break;
653 			}
654 			DELAY (1000);
655 		}
656 		/*
657 		 *	Re-enable the interrupts.
658 		 */
659 		asr_set_intr(sc, Old);
660 		return (buffer);
661 	}
662 	return (NULL);
663 } /* ASR_getStatus */
664 
665 /*
666  *	Check if the device is a SCSI I2O HBA, and add it to the list.
667  */
668 
669 /*
670  * Probe for ASR controller.  If we find it, we will use it.
671  * virtual adapters.
672  */
673 static int
674 asr_probe(device_t dev)
675 {
676 	u_int32_t id;
677 
678 	id = (pci_get_device(dev) << 16) | pci_get_vendor(dev);
679 	if ((id == 0xA5011044) || (id == 0xA5111044)) {
680 		device_set_desc(dev, "Adaptec Caching SCSI RAID");
681 		return (BUS_PROBE_DEFAULT);
682 	}
683 	return (ENXIO);
684 } /* asr_probe */
685 
686 static __inline union asr_ccb *
687 asr_alloc_ccb(Asr_softc_t *sc)
688 {
689 	union asr_ccb *new_ccb;
690 
691 	if ((new_ccb = (union asr_ccb *)kmalloc(sizeof(*new_ccb),
692 	  M_DEVBUF, M_WAITOK | M_ZERO)) != NULL) {
693 		new_ccb->ccb_h.pinfo.priority = 1;
694 		new_ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX;
695 		new_ccb->ccb_h.spriv_ptr0 = sc;
696 	}
697 	return (new_ccb);
698 } /* asr_alloc_ccb */
699 
700 static __inline void
701 asr_free_ccb(union asr_ccb *free_ccb)
702 {
703 	kfree(free_ccb, M_DEVBUF);
704 } /* asr_free_ccb */
705 
706 /*
707  *	Print inquiry data `carefully'
708  */
709 static void
710 ASR_prstring(u_int8_t *s, int len)
711 {
712 	while ((--len >= 0) && (*s) && (*s != ' ') && (*s != '-')) {
713 		kprintf ("%c", *(s++));
714 	}
715 } /* ASR_prstring */
716 
717 /*
718  *	Send a message synchronously and without Interrupt to a ccb.
719  */
720 static int
721 ASR_queue_s(union asr_ccb *ccb, PI2O_MESSAGE_FRAME Message)
722 {
723 	U32		Mask;
724 	Asr_softc_t	*sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
725 
726 	/*
727 	 * We do not need any (optional byteswapping) method access to
728 	 * the Initiator context field.
729 	 */
730 	I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
731 
732 	/* Prevent interrupt service */
733 	crit_enter();
734 	Mask = asr_get_intr(sc);
735 	asr_set_intr(sc, Mask | Mask_InterruptsDisabled);
736 
737 	if (ASR_queue(sc, Message) == EMPTY_QUEUE) {
738 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
739 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
740 	}
741 
742 	/*
743 	 * Wait for this board to report a finished instruction.
744 	 */
745 	while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
746 		(void)asr_intr (sc);
747 	}
748 
749 	/* Re-enable Interrupts */
750 	asr_set_intr(sc, Mask);
751 	crit_exit();
752 
753 	return (ccb->ccb_h.status);
754 } /* ASR_queue_s */
755 
756 /*
757  *	Send a message synchronously to an Asr_softc_t.
758  */
759 static int
760 ASR_queue_c(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
761 {
762 	union asr_ccb	*ccb;
763 	int		status;
764 
765 	if ((ccb = asr_alloc_ccb (sc)) == NULL) {
766 		return (CAM_REQUEUE_REQ);
767 	}
768 
769 	status = ASR_queue_s (ccb, Message);
770 
771 	asr_free_ccb(ccb);
772 
773 	return (status);
774 } /* ASR_queue_c */
775 
776 /*
777  *	Add the specified ccb to the active queue
778  */
779 static __inline void
780 ASR_ccbAdd(Asr_softc_t *sc, union asr_ccb *ccb)
781 {
782 	crit_enter();
783 	LIST_INSERT_HEAD(&(sc->ha_ccb), &(ccb->ccb_h), sim_links.le);
784 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
785 		if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) {
786 			/*
787 			 * RAID systems can take considerable time to
788 			 * complete some commands given the large cache
789 			 * flashes switching from write back to write thru.
790 			 */
791 			ccb->ccb_h.timeout = 6 * 60 * 1000;
792 		}
793 		callout_reset(&ccb->ccb_h.timeout_ch,
794 		    (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb);
795 	}
796 	crit_exit();
797 } /* ASR_ccbAdd */
798 
799 /*
800  *	Remove the specified ccb from the active queue.
801  */
802 static __inline void
803 ASR_ccbRemove(Asr_softc_t *sc, union asr_ccb *ccb)
804 {
805 	crit_enter();
806 	callout_stop(&ccb->ccb_h.timeout_ch);
807 	LIST_REMOVE(&(ccb->ccb_h), sim_links.le);
808 	crit_exit();
809 } /* ASR_ccbRemove */
810 
811 /*
812  *	Fail all the active commands, so they get re-issued by the operating
813  *	system.
814  */
815 static void
816 ASR_failActiveCommands(Asr_softc_t *sc)
817 {
818 	struct ccb_hdr	*ccb;
819 
820 	crit_enter();
821 	/*
822 	 *	We do not need to inform the CAM layer that we had a bus
823 	 * reset since we manage it on our own, this also prevents the
824 	 * SCSI_DELAY settling that would be required on other systems.
825 	 * The `SCSI_DELAY' has already been handled by the card via the
826 	 * acquisition of the LCT table while we are at CAM priority level.
827 	 *  for (int bus = 0; bus <= sc->ha_MaxBus; ++bus) {
828 	 *	xpt_async (AC_BUS_RESET, sc->ha_path[bus], NULL);
829 	 *  }
830 	 */
831 	while ((ccb = LIST_FIRST(&(sc->ha_ccb))) != NULL) {
832 		ASR_ccbRemove (sc, (union asr_ccb *)ccb);
833 
834 		ccb->status &= ~CAM_STATUS_MASK;
835 		ccb->status |= CAM_REQUEUE_REQ;
836 		/* Nothing Transfered */
837 		((struct ccb_scsiio *)ccb)->resid
838 		  = ((struct ccb_scsiio *)ccb)->dxfer_len;
839 
840 		if (ccb->path) {
841 			xpt_done ((union ccb *)ccb);
842 		} else {
843 			wakeup (ccb);
844 		}
845 	}
846 	crit_exit();
847 } /* ASR_failActiveCommands */
848 
849 /*
850  *	The following command causes the HBA to reset the specific bus
851  */
852 static void
853 ASR_resetBus(Asr_softc_t *sc, int bus)
854 {
855 	I2O_HBA_BUS_RESET_MESSAGE	Message;
856 	I2O_HBA_BUS_RESET_MESSAGE	*Message_Ptr;
857 	PI2O_LCT_ENTRY			Device;
858 
859 	Message_Ptr = (I2O_HBA_BUS_RESET_MESSAGE *)ASR_fillMessage(&Message,
860 	  sizeof(I2O_HBA_BUS_RESET_MESSAGE));
861 	I2O_MESSAGE_FRAME_setFunction(&Message_Ptr->StdMessageFrame,
862 	  I2O_HBA_BUS_RESET);
863 	for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
864 	  (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
865 	  ++Device) {
866 		if (((Device->le_type & I2O_PORT) != 0)
867 		 && (Device->le_bus == bus)) {
868 			I2O_MESSAGE_FRAME_setTargetAddress(
869 			  &Message_Ptr->StdMessageFrame,
870 			  I2O_LCT_ENTRY_getLocalTID(Device));
871 			/* Asynchronous command, with no expectations */
872 			(void)ASR_queue(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
873 			break;
874 		}
875 	}
876 } /* ASR_resetBus */
877 
878 static __inline int
879 ASR_getBlinkLedCode(Asr_softc_t *sc)
880 {
881 	U8	blink;
882 
883 	if (sc == NULL)
884 		return (0);
885 
886 	blink = bus_space_read_1(sc->ha_frame_btag,
887 				 sc->ha_frame_bhandle, sc->ha_blinkLED + 1);
888 	if (blink != 0xBC)
889 		return (0);
890 
891 	blink = bus_space_read_1(sc->ha_frame_btag,
892 				 sc->ha_frame_bhandle, sc->ha_blinkLED);
893 	return (blink);
894 } /* ASR_getBlinkCode */
895 
896 /*
897  *	Determine the address of an TID lookup. Must be done at high priority
898  *	since the address can be changed by other threads of execution.
899  *
900  *	Returns NULL pointer if not indexible (but will attempt to generate
901  *	an index if `new_entry' flag is set to TRUE).
902  *
903  *	All addressible entries are to be guaranteed zero if never initialized.
904  */
905 static tid_t *
906 ASR_getTidAddress(Asr_softc_t *sc, int bus, int target, int lun, int new_entry)
907 {
908 	target2lun_t	*bus_ptr;
909 	lun2tid_t	*target_ptr;
910 	unsigned	new_size;
911 
912 	/*
913 	 *	Validity checking of incoming parameters. More of a bound
914 	 * expansion limit than an issue with the code dealing with the
915 	 * values.
916 	 *
917 	 *	sc must be valid before it gets here, so that check could be
918 	 * dropped if speed a critical issue.
919 	 */
920 	if ((sc == NULL)
921 	 || (bus > MAX_CHANNEL)
922 	 || (target > sc->ha_MaxId)
923 	 || (lun > sc->ha_MaxLun)) {
924 		debug_asr_printf("(%lx,%d,%d,%d) target out of range\n",
925 		  (u_long)sc, bus, target, lun);
926 		return (NULL);
927 	}
928 	/*
929 	 *	See if there is an associated bus list.
930 	 *
931 	 *	for performance, allocate in size of BUS_CHUNK chunks.
932 	 *	BUS_CHUNK must be a power of two. This is to reduce
933 	 *	fragmentation effects on the allocations.
934 	 */
935 #define BUS_CHUNK 8
936 	new_size = ((target + BUS_CHUNK - 1) & ~(BUS_CHUNK - 1));
937 	if ((bus_ptr = sc->ha_targets[bus]) == NULL) {
938 		/*
939 		 *	Allocate a new structure?
940 		 *		Since one element in structure, the +1
941 		 *		needed for size has been abstracted.
942 		 */
943 		if ((new_entry == FALSE)
944 		 || ((sc->ha_targets[bus] = bus_ptr = (target2lun_t *)kmalloc (
945 		    sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
946 		    M_TEMP, M_WAITOK | M_ZERO))
947 		   == NULL)) {
948 			debug_asr_printf("failed to allocate bus list\n");
949 			return (NULL);
950 		}
951 		bus_ptr->size = new_size + 1;
952 	} else if (bus_ptr->size <= new_size) {
953 		target2lun_t * new_bus_ptr;
954 
955 		/*
956 		 *	Reallocate a new structure?
957 		 *		Since one element in structure, the +1
958 		 *		needed for size has been abstracted.
959 		 */
960 		if ((new_entry == FALSE)
961 		 || ((new_bus_ptr = (target2lun_t *)kmalloc (
962 		    sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
963 		    M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
964 			debug_asr_printf("failed to reallocate bus list\n");
965 			return (NULL);
966 		}
967 		/*
968 		 *	Copy the whole thing, safer, simpler coding
969 		 * and not really performance critical at this point.
970 		 */
971 		bcopy(bus_ptr, new_bus_ptr, sizeof(*bus_ptr)
972 		    + (sizeof(bus_ptr->LUN) * (bus_ptr->size - 1)));
973 		sc->ha_targets[bus] = new_bus_ptr;
974 		kfree(bus_ptr, M_TEMP);
975 		bus_ptr = new_bus_ptr;
976 		bus_ptr->size = new_size + 1;
977 	}
978 	/*
979 	 *	We now have the bus list, lets get to the target list.
980 	 *	Since most systems have only *one* lun, we do not allocate
981 	 *	in chunks as above, here we allow one, then in chunk sizes.
982 	 *	TARGET_CHUNK must be a power of two. This is to reduce
983 	 *	fragmentation effects on the allocations.
984 	 */
985 #define TARGET_CHUNK 8
986 	if ((new_size = lun) != 0) {
987 		new_size = ((lun + TARGET_CHUNK - 1) & ~(TARGET_CHUNK - 1));
988 	}
989 	if ((target_ptr = bus_ptr->LUN[target]) == NULL) {
990 		/*
991 		 *	Allocate a new structure?
992 		 *		Since one element in structure, the +1
993 		 *		needed for size has been abstracted.
994 		 */
995 		if ((new_entry == FALSE)
996 		 || ((bus_ptr->LUN[target] = target_ptr = (lun2tid_t *)kmalloc (
997 		    sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
998 		    M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
999 			debug_asr_printf("failed to allocate target list\n");
1000 			return (NULL);
1001 		}
1002 		target_ptr->size = new_size + 1;
1003 	} else if (target_ptr->size <= new_size) {
1004 		lun2tid_t * new_target_ptr;
1005 
1006 		/*
1007 		 *	Reallocate a new structure?
1008 		 *		Since one element in structure, the +1
1009 		 *		needed for size has been abstracted.
1010 		 */
1011 		if ((new_entry == FALSE)
1012 		 || ((new_target_ptr = (lun2tid_t *)kmalloc (
1013 		    sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1014 		    M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
1015 			debug_asr_printf("failed to reallocate target list\n");
1016 			return (NULL);
1017 		}
1018 		/*
1019 		 *	Copy the whole thing, safer, simpler coding
1020 		 * and not really performance critical at this point.
1021 		 */
1022 		bcopy(target_ptr, new_target_ptr, sizeof(*target_ptr)
1023 		    + (sizeof(target_ptr->TID) * (target_ptr->size - 1)));
1024 		bus_ptr->LUN[target] = new_target_ptr;
1025 		kfree(target_ptr, M_TEMP);
1026 		target_ptr = new_target_ptr;
1027 		target_ptr->size = new_size + 1;
1028 	}
1029 	/*
1030 	 *	Now, acquire the TID address from the LUN indexed list.
1031 	 */
1032 	return (&(target_ptr->TID[lun]));
1033 } /* ASR_getTidAddress */
1034 
1035 /*
1036  *	Get a pre-existing TID relationship.
1037  *
1038  *	If the TID was never set, return (tid_t)-1.
1039  *
1040  *	should use mutex rather than spl.
1041  */
1042 static __inline tid_t
1043 ASR_getTid(Asr_softc_t *sc, int bus, int target, int lun)
1044 {
1045 	tid_t	*tid_ptr;
1046 	tid_t	retval;
1047 
1048 	crit_enter();
1049 	if (((tid_ptr = ASR_getTidAddress(sc, bus, target, lun, FALSE)) == NULL)
1050 	/* (tid_t)0 or (tid_t)-1 indicate no TID */
1051 	 || (*tid_ptr == (tid_t)0)) {
1052 		crit_exit();
1053 		return ((tid_t)-1);
1054 	}
1055 	retval = *tid_ptr;
1056 	crit_exit();
1057 	return (retval);
1058 } /* ASR_getTid */
1059 
1060 /*
1061  *	Set a TID relationship.
1062  *
1063  *	If the TID was not set, return (tid_t)-1.
1064  *
1065  *	should use mutex rather than spl.
1066  */
1067 static __inline tid_t
1068 ASR_setTid(Asr_softc_t *sc, int bus, int target, int lun, tid_t	TID)
1069 {
1070 	tid_t	*tid_ptr;
1071 
1072 	if (TID != (tid_t)-1) {
1073 		if (TID == 0) {
1074 			return ((tid_t)-1);
1075 		}
1076 		crit_enter();
1077 		if ((tid_ptr = ASR_getTidAddress(sc, bus, target, lun, TRUE))
1078 		 == NULL) {
1079 			crit_exit();
1080 			return ((tid_t)-1);
1081 		}
1082 		*tid_ptr = TID;
1083 		crit_exit();
1084 	}
1085 	return (TID);
1086 } /* ASR_setTid */
1087 
1088 /*-------------------------------------------------------------------------*/
1089 /*		      Function ASR_rescan				   */
1090 /*-------------------------------------------------------------------------*/
1091 /* The Parameters Passed To This Function Are :				   */
1092 /*     Asr_softc_t *	 : HBA miniport driver's adapter data storage.	   */
1093 /*									   */
1094 /* This Function Will rescan the adapter and resynchronize any data	   */
1095 /*									   */
1096 /* Return : 0 For OK, Error Code Otherwise				   */
1097 /*-------------------------------------------------------------------------*/
1098 
1099 static int
1100 ASR_rescan(Asr_softc_t *sc)
1101 {
1102 	int bus;
1103 	int error;
1104 
1105 	/*
1106 	 * Re-acquire the LCT table and synchronize us to the adapter.
1107 	 */
1108 	if ((error = ASR_acquireLct(sc)) == 0) {
1109 		error = ASR_acquireHrt(sc);
1110 	}
1111 
1112 	if (error != 0) {
1113 		return error;
1114 	}
1115 
1116 	bus = sc->ha_MaxBus;
1117 	/* Reset all existing cached TID lookups */
1118 	do {
1119 		int target, event = 0;
1120 
1121 		/*
1122 		 *	Scan for all targets on this bus to see if they
1123 		 * got affected by the rescan.
1124 		 */
1125 		for (target = 0; target <= sc->ha_MaxId; ++target) {
1126 			int lun;
1127 
1128 			/* Stay away from the controller ID */
1129 			if (target == sc->ha_adapter_target[bus]) {
1130 				continue;
1131 			}
1132 			for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
1133 				PI2O_LCT_ENTRY Device;
1134 				tid_t	       TID = (tid_t)-1;
1135 				tid_t	       LastTID;
1136 
1137 				/*
1138 				 * See if the cached TID changed. Search for
1139 				 * the device in our new LCT.
1140 				 */
1141 				for (Device = sc->ha_LCT->LCTEntry;
1142 				  Device < (PI2O_LCT_ENTRY)(((U32 *)sc->ha_LCT)
1143 				   + I2O_LCT_getTableSize(sc->ha_LCT));
1144 				  ++Device) {
1145 					if ((Device->le_type != I2O_UNKNOWN)
1146 					 && (Device->le_bus == bus)
1147 					 && (Device->le_target == target)
1148 					 && (Device->le_lun == lun)
1149 					 && (I2O_LCT_ENTRY_getUserTID(Device)
1150 					  == 0xFFF)) {
1151 						TID = I2O_LCT_ENTRY_getLocalTID(
1152 						  Device);
1153 						break;
1154 					}
1155 				}
1156 				/*
1157 				 * Indicate to the OS that the label needs
1158 				 * to be recalculated, or that the specific
1159 				 * open device is no longer valid (Merde)
1160 				 * because the cached TID changed.
1161 				 */
1162 				LastTID = ASR_getTid (sc, bus, target, lun);
1163 				if (LastTID != TID) {
1164 					struct cam_path * path;
1165 
1166 					if (xpt_create_path(&path,
1167 					  /*periph*/NULL,
1168 					  cam_sim_path(sc->ha_sim[bus]),
1169 					  target, lun) != CAM_REQ_CMP) {
1170 						if (TID == (tid_t)-1) {
1171 							event |= AC_LOST_DEVICE;
1172 						} else {
1173 							event |= AC_INQ_CHANGED
1174 							       | AC_GETDEV_CHANGED;
1175 						}
1176 					} else {
1177 						if (TID == (tid_t)-1) {
1178 							xpt_async(
1179 							  AC_LOST_DEVICE,
1180 							  path, NULL);
1181 						} else if (LastTID == (tid_t)-1) {
1182 							struct ccb_getdev ccb;
1183 
1184 							xpt_setup_ccb(
1185 							  &(ccb.ccb_h),
1186 							  path, /*priority*/5);
1187 							xpt_async(
1188 							  AC_FOUND_DEVICE,
1189 							  path,
1190 							  &ccb);
1191 						} else {
1192 							xpt_async(
1193 							  AC_INQ_CHANGED,
1194 							  path, NULL);
1195 							xpt_async(
1196 							  AC_GETDEV_CHANGED,
1197 							  path, NULL);
1198 						}
1199 					}
1200 				}
1201 				/*
1202 				 *	We have the option of clearing the
1203 				 * cached TID for it to be rescanned, or to
1204 				 * set it now even if the device never got
1205 				 * accessed. We chose the later since we
1206 				 * currently do not use the condition that
1207 				 * the TID ever got cached.
1208 				 */
1209 				ASR_setTid (sc, bus, target, lun, TID);
1210 			}
1211 		}
1212 		/*
1213 		 *	The xpt layer can not handle multiple events at the
1214 		 * same call.
1215 		 */
1216 		if (event & AC_LOST_DEVICE) {
1217 			xpt_async(AC_LOST_DEVICE, sc->ha_path[bus], NULL);
1218 		}
1219 		if (event & AC_INQ_CHANGED) {
1220 			xpt_async(AC_INQ_CHANGED, sc->ha_path[bus], NULL);
1221 		}
1222 		if (event & AC_GETDEV_CHANGED) {
1223 			xpt_async(AC_GETDEV_CHANGED, sc->ha_path[bus], NULL);
1224 		}
1225 	} while (--bus >= 0);
1226 	return (error);
1227 } /* ASR_rescan */
1228 
1229 /*-------------------------------------------------------------------------*/
1230 /*		      Function ASR_reset				   */
1231 /*-------------------------------------------------------------------------*/
1232 /* The Parameters Passed To This Function Are :				   */
1233 /*     Asr_softc_t *	  : HBA miniport driver's adapter data storage.	   */
1234 /*									   */
1235 /* This Function Will reset the adapter and resynchronize any data	   */
1236 /*									   */
1237 /* Return : None							   */
1238 /*-------------------------------------------------------------------------*/
1239 
1240 static int
1241 ASR_reset(Asr_softc_t *sc)
1242 {
1243 	int retVal;
1244 
1245 	crit_enter();
1246 	if ((sc->ha_in_reset == HA_IN_RESET)
1247 	 || (sc->ha_in_reset == HA_OFF_LINE_RECOVERY)) {
1248 		crit_exit();
1249 		return (EBUSY);
1250 	}
1251 	/*
1252 	 *	Promotes HA_OPERATIONAL to HA_IN_RESET,
1253 	 * or HA_OFF_LINE to HA_OFF_LINE_RECOVERY.
1254 	 */
1255 	++(sc->ha_in_reset);
1256 	if (ASR_resetIOP(sc) == 0) {
1257 		debug_asr_printf ("ASR_resetIOP failed\n");
1258 		/*
1259 		 *	We really need to take this card off-line, easier said
1260 		 * than make sense. Better to keep retrying for now since if a
1261 		 * UART cable is connected the blinkLEDs the adapter is now in
1262 		 * a hard state requiring action from the monitor commands to
1263 		 * the HBA to continue. For debugging waiting forever is a
1264 		 * good thing. In a production system, however, one may wish
1265 		 * to instead take the card off-line ...
1266 		 */
1267 		/* Wait Forever */
1268 		while (ASR_resetIOP(sc) == 0);
1269 	}
1270 	retVal = ASR_init (sc);
1271 	crit_exit();
1272 	if (retVal != 0) {
1273 		debug_asr_printf ("ASR_init failed\n");
1274 		sc->ha_in_reset = HA_OFF_LINE;
1275 		return (ENXIO);
1276 	}
1277 	if (ASR_rescan (sc) != 0) {
1278 		debug_asr_printf ("ASR_rescan failed\n");
1279 	}
1280 	ASR_failActiveCommands (sc);
1281 	if (sc->ha_in_reset == HA_OFF_LINE_RECOVERY) {
1282 		kprintf ("asr%d: Brining adapter back on-line\n",
1283 		  sc->ha_path[0]
1284 		    ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1285 		    : 0);
1286 	}
1287 	sc->ha_in_reset = HA_OPERATIONAL;
1288 	return (0);
1289 } /* ASR_reset */
1290 
1291 /*
1292  *	Device timeout handler.
1293  */
1294 static void
1295 asr_timeout(void *arg)
1296 {
1297 	union asr_ccb	*ccb = (union asr_ccb *)arg;
1298 	Asr_softc_t	*sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1299 	int		s;
1300 
1301 	debug_asr_print_path(ccb);
1302 	debug_asr_printf("timed out");
1303 
1304 	/*
1305 	 *	Check if the adapter has locked up?
1306 	 */
1307 	if ((s = ASR_getBlinkLedCode(sc)) != 0) {
1308 		/* Reset Adapter */
1309 		kprintf ("asr%d: Blink LED 0x%x resetting adapter\n",
1310 		  cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), s);
1311 		if (ASR_reset (sc) == ENXIO) {
1312 			/* Try again later */
1313 			callout_reset(&ccb->ccb_h.timeout_ch,
1314 			    (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb);
1315 		}
1316 		return;
1317 	}
1318 	/*
1319 	 *	Abort does not function on the ASR card!!! Walking away from
1320 	 * the SCSI command is also *very* dangerous. A SCSI BUS reset is
1321 	 * our best bet, followed by a complete adapter reset if that fails.
1322 	 */
1323 	crit_enter();
1324 	/* Check if we already timed out once to raise the issue */
1325 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_CMD_TIMEOUT) {
1326 		debug_asr_printf (" AGAIN\nreinitializing adapter\n");
1327 		if (ASR_reset (sc) == ENXIO) {
1328 			callout_reset(&ccb->ccb_h.timeout_ch,
1329 			    (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb);
1330 		}
1331 		crit_exit();
1332 		return;
1333 	}
1334 	debug_asr_printf ("\nresetting bus\n");
1335 	/* If the BUS reset does not take, then an adapter reset is next! */
1336 	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1337 	ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
1338 	callout_reset(&ccb->ccb_h.timeout_ch, (ccb->ccb_h.timeout * hz) / 1000,
1339 		      asr_timeout, ccb);
1340 	ASR_resetBus (sc, cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)));
1341 	xpt_async (AC_BUS_RESET, ccb->ccb_h.path, NULL);
1342 	crit_exit();
1343 } /* asr_timeout */
1344 
1345 /*
1346  * send a message asynchronously
1347  */
1348 static int
1349 ASR_queue(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
1350 {
1351 	U32		MessageOffset;
1352 	union asr_ccb	*ccb;
1353 
1354 	debug_asr_printf("Host Command Dump:\n");
1355 	debug_asr_dump_message(Message);
1356 
1357 	ccb = (union asr_ccb *)(long)
1358 	  I2O_MESSAGE_FRAME_getInitiatorContext64(Message);
1359 
1360 	if ((MessageOffset = ASR_getMessage(sc)) != EMPTY_QUEUE) {
1361 		asr_set_frame(sc, Message, MessageOffset,
1362 			      I2O_MESSAGE_FRAME_getMessageSize(Message));
1363 		if (ccb) {
1364 			ASR_ccbAdd (sc, ccb);
1365 		}
1366 		/* Post the command */
1367 		asr_set_ToFIFO(sc, MessageOffset);
1368 	} else {
1369 		if (ASR_getBlinkLedCode(sc)) {
1370 			/*
1371 			 *	Unlikely we can do anything if we can't grab a
1372 			 * message frame :-(, but lets give it a try.
1373 			 */
1374 			(void)ASR_reset(sc);
1375 		}
1376 	}
1377 	return (MessageOffset);
1378 } /* ASR_queue */
1379 
1380 
1381 /* Simple Scatter Gather elements */
1382 #define	SG(SGL,Index,Flags,Buffer,Size)				   \
1383 	I2O_FLAGS_COUNT_setCount(				   \
1384 	  &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1385 	  Size);						   \
1386 	I2O_FLAGS_COUNT_setFlags(				   \
1387 	  &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1388 	  I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | (Flags));	   \
1389 	I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(		   \
1390 	  &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index]),		   \
1391 	  (Buffer == NULL) ? 0 : KVTOPHYS(Buffer))
1392 
1393 /*
1394  *	Retrieve Parameter Group.
1395  */
1396 static void *
1397 ASR_getParams(Asr_softc_t *sc, tid_t TID, int Group, void *Buffer,
1398 	      unsigned BufferSize)
1399 {
1400 	struct paramGetMessage {
1401 		I2O_UTIL_PARAMS_GET_MESSAGE M;
1402 		char
1403 		   F[sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)];
1404 		struct Operations {
1405 			I2O_PARAM_OPERATIONS_LIST_HEADER Header;
1406 			I2O_PARAM_OPERATION_ALL_TEMPLATE Template[1];
1407 		}			     O;
1408 	}				Message;
1409 	struct Operations		*Operations_Ptr;
1410 	I2O_UTIL_PARAMS_GET_MESSAGE	*Message_Ptr;
1411 	struct ParamBuffer {
1412 		I2O_PARAM_RESULTS_LIST_HEADER	    Header;
1413 		I2O_PARAM_READ_OPERATION_RESULT	    Read;
1414 		char				    Info[1];
1415 	}				*Buffer_Ptr;
1416 
1417 	Message_Ptr = (I2O_UTIL_PARAMS_GET_MESSAGE *)ASR_fillMessage(&Message,
1418 	  sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1419 	    + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1420 	Operations_Ptr = (struct Operations *)((char *)Message_Ptr
1421 	  + sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1422 	  + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1423 	bzero(Operations_Ptr, sizeof(struct Operations));
1424 	I2O_PARAM_OPERATIONS_LIST_HEADER_setOperationCount(
1425 	  &(Operations_Ptr->Header), 1);
1426 	I2O_PARAM_OPERATION_ALL_TEMPLATE_setOperation(
1427 	  &(Operations_Ptr->Template[0]), I2O_PARAMS_OPERATION_FIELD_GET);
1428 	I2O_PARAM_OPERATION_ALL_TEMPLATE_setFieldCount(
1429 	  &(Operations_Ptr->Template[0]), 0xFFFF);
1430 	I2O_PARAM_OPERATION_ALL_TEMPLATE_setGroupNumber(
1431 	  &(Operations_Ptr->Template[0]), Group);
1432 	Buffer_Ptr = (struct ParamBuffer *)Buffer;
1433 	bzero(Buffer_Ptr, BufferSize);
1434 
1435 	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1436 	  I2O_VERSION_11
1437 	  + (((sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1438 	    / sizeof(U32)) << 4));
1439 	I2O_MESSAGE_FRAME_setTargetAddress (&(Message_Ptr->StdMessageFrame),
1440 	  TID);
1441 	I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
1442 	  I2O_UTIL_PARAMS_GET);
1443 	/*
1444 	 *  Set up the buffers as scatter gather elements.
1445 	 */
1446 	SG(&(Message_Ptr->SGL), 0,
1447 	  I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER,
1448 	  Operations_Ptr, sizeof(struct Operations));
1449 	SG(&(Message_Ptr->SGL), 1,
1450 	  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1451 	  Buffer_Ptr, BufferSize);
1452 
1453 	if ((ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) == CAM_REQ_CMP)
1454 	 && (Buffer_Ptr->Header.ResultCount)) {
1455 		return ((void *)(Buffer_Ptr->Info));
1456 	}
1457 	return (NULL);
1458 } /* ASR_getParams */
1459 
1460 /*
1461  *	Acquire the LCT information.
1462  */
1463 static int
1464 ASR_acquireLct(Asr_softc_t *sc)
1465 {
1466 	PI2O_EXEC_LCT_NOTIFY_MESSAGE	Message_Ptr;
1467 	PI2O_SGE_SIMPLE_ELEMENT		sg;
1468 	int				MessageSizeInBytes;
1469 	caddr_t				v;
1470 	int				len;
1471 	I2O_LCT				Table;
1472 	PI2O_LCT_ENTRY			Entry;
1473 
1474 	/*
1475 	 *	sc value assumed valid
1476 	 */
1477 	MessageSizeInBytes = sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) -
1478 	    sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT);
1479 	if ((Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)kmalloc(
1480 	    MessageSizeInBytes, M_TEMP, M_WAITOK)) == NULL) {
1481 		return (ENOMEM);
1482 	}
1483 	(void)ASR_fillMessage((void *)Message_Ptr, MessageSizeInBytes);
1484 	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1485 	    (I2O_VERSION_11 + (((sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) -
1486 	    sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4)));
1487 	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1488 	    I2O_EXEC_LCT_NOTIFY);
1489 	I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
1490 	    I2O_CLASS_MATCH_ANYCLASS);
1491 	/*
1492 	 *	Call the LCT table to determine the number of device entries
1493 	 * to reserve space for.
1494 	 */
1495 	SG(&(Message_Ptr->SGL), 0,
1496 	  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, &Table,
1497 	  sizeof(I2O_LCT));
1498 	/*
1499 	 *	since this code is reused in several systems, code efficiency
1500 	 * is greater by using a shift operation rather than a divide by
1501 	 * sizeof(u_int32_t).
1502 	 */
1503 	I2O_LCT_setTableSize(&Table,
1504 	  (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1505 	(void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1506 	/*
1507 	 *	Determine the size of the LCT table.
1508 	 */
1509 	if (sc->ha_LCT) {
1510 		kfree(sc->ha_LCT, M_TEMP);
1511 	}
1512 	/*
1513 	 *	malloc only generates contiguous memory when less than a
1514 	 * page is expected. We must break the request up into an SG list ...
1515 	 */
1516 	if (((len = (I2O_LCT_getTableSize(&Table) << 2)) <=
1517 	  (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)))
1518 	 || (len > (128 * 1024))) {	/* Arbitrary */
1519 		kfree(Message_Ptr, M_TEMP);
1520 		return (EINVAL);
1521 	}
1522 	if ((sc->ha_LCT = (PI2O_LCT)kmalloc (len, M_TEMP, M_WAITOK)) == NULL) {
1523 		kfree(Message_Ptr, M_TEMP);
1524 		return (ENOMEM);
1525 	}
1526 	/*
1527 	 *	since this code is reused in several systems, code efficiency
1528 	 * is greater by using a shift operation rather than a divide by
1529 	 * sizeof(u_int32_t).
1530 	 */
1531 	I2O_LCT_setTableSize(sc->ha_LCT,
1532 	  (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1533 	/*
1534 	 *	Convert the access to the LCT table into a SG list.
1535 	 */
1536 	sg = Message_Ptr->SGL.u.Simple;
1537 	v = (caddr_t)(sc->ha_LCT);
1538 	for (;;) {
1539 		int next, base, span;
1540 
1541 		span = 0;
1542 		next = base = KVTOPHYS(v);
1543 		I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1544 
1545 		/* How far can we go contiguously */
1546 		while ((len > 0) && (base == next)) {
1547 			int size;
1548 
1549 			next = trunc_page(base) + PAGE_SIZE;
1550 			size = next - base;
1551 			if (size > len) {
1552 				size = len;
1553 			}
1554 			span += size;
1555 			v += size;
1556 			len -= size;
1557 			base = KVTOPHYS(v);
1558 		}
1559 
1560 		/* Construct the Flags */
1561 		I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1562 		{
1563 			int rw = I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT;
1564 			if (len <= 0) {
1565 				rw = (I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT
1566 				    | I2O_SGL_FLAGS_LAST_ELEMENT
1567 				    | I2O_SGL_FLAGS_END_OF_BUFFER);
1568 			}
1569 			I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), rw);
1570 		}
1571 
1572 		if (len <= 0) {
1573 			break;
1574 		}
1575 
1576 		/*
1577 		 * Incrementing requires resizing of the packet.
1578 		 */
1579 		++sg;
1580 		MessageSizeInBytes += sizeof(*sg);
1581 		I2O_MESSAGE_FRAME_setMessageSize(
1582 		  &(Message_Ptr->StdMessageFrame),
1583 		  I2O_MESSAGE_FRAME_getMessageSize(
1584 		    &(Message_Ptr->StdMessageFrame))
1585 		  + (sizeof(*sg) / sizeof(U32)));
1586 		{
1587 			PI2O_EXEC_LCT_NOTIFY_MESSAGE NewMessage_Ptr;
1588 
1589 			if ((NewMessage_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)
1590 			    kmalloc(MessageSizeInBytes, M_TEMP, M_WAITOK))
1591 			    == NULL) {
1592 				kfree(sc->ha_LCT, M_TEMP);
1593 				sc->ha_LCT = NULL;
1594 				kfree(Message_Ptr, M_TEMP);
1595 				return (ENOMEM);
1596 			}
1597 			span = ((caddr_t)sg) - (caddr_t)Message_Ptr;
1598 			bcopy(Message_Ptr, NewMessage_Ptr, span);
1599 			kfree(Message_Ptr, M_TEMP);
1600 			sg = (PI2O_SGE_SIMPLE_ELEMENT)
1601 			  (((caddr_t)NewMessage_Ptr) + span);
1602 			Message_Ptr = NewMessage_Ptr;
1603 		}
1604 	}
1605 	{	int retval;
1606 
1607 		retval = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1608 		kfree(Message_Ptr, M_TEMP);
1609 		if (retval != CAM_REQ_CMP) {
1610 			return (ENODEV);
1611 		}
1612 	}
1613 	/* If the LCT table grew, lets truncate accesses */
1614 	if (I2O_LCT_getTableSize(&Table) < I2O_LCT_getTableSize(sc->ha_LCT)) {
1615 		I2O_LCT_setTableSize(sc->ha_LCT, I2O_LCT_getTableSize(&Table));
1616 	}
1617 	for (Entry = sc->ha_LCT->LCTEntry; Entry < (PI2O_LCT_ENTRY)
1618 	  (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1619 	  ++Entry) {
1620 		Entry->le_type = I2O_UNKNOWN;
1621 		switch (I2O_CLASS_ID_getClass(&(Entry->ClassID))) {
1622 
1623 		case I2O_CLASS_RANDOM_BLOCK_STORAGE:
1624 			Entry->le_type = I2O_BSA;
1625 			break;
1626 
1627 		case I2O_CLASS_SCSI_PERIPHERAL:
1628 			Entry->le_type = I2O_SCSI;
1629 			break;
1630 
1631 		case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
1632 			Entry->le_type = I2O_FCA;
1633 			break;
1634 
1635 		case I2O_CLASS_BUS_ADAPTER_PORT:
1636 			Entry->le_type = I2O_PORT | I2O_SCSI;
1637 			/* FALLTHRU */
1638 		case I2O_CLASS_FIBRE_CHANNEL_PORT:
1639 			if (I2O_CLASS_ID_getClass(&(Entry->ClassID)) ==
1640 			  I2O_CLASS_FIBRE_CHANNEL_PORT) {
1641 				Entry->le_type = I2O_PORT | I2O_FCA;
1642 			}
1643 		{	struct ControllerInfo {
1644 				I2O_PARAM_RESULTS_LIST_HEADER	    Header;
1645 				I2O_PARAM_READ_OPERATION_RESULT	    Read;
1646 				I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1647 			} Buffer;
1648 			PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1649 
1650 			Entry->le_bus = 0xff;
1651 			Entry->le_target = 0xff;
1652 			Entry->le_lun = 0xff;
1653 
1654 			if ((Info = (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)
1655 			  ASR_getParams(sc,
1656 			    I2O_LCT_ENTRY_getLocalTID(Entry),
1657 			    I2O_HBA_SCSI_CONTROLLER_INFO_GROUP_NO,
1658 			    &Buffer, sizeof(struct ControllerInfo))) == NULL) {
1659 				continue;
1660 			}
1661 			Entry->le_target
1662 			  = I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR_getInitiatorID(
1663 			    Info);
1664 			Entry->le_lun = 0;
1665 		}	/* FALLTHRU */
1666 		default:
1667 			continue;
1668 		}
1669 		{	struct DeviceInfo {
1670 				I2O_PARAM_RESULTS_LIST_HEADER	Header;
1671 				I2O_PARAM_READ_OPERATION_RESULT Read;
1672 				I2O_DPT_DEVICE_INFO_SCALAR	Info;
1673 			} Buffer;
1674 			PI2O_DPT_DEVICE_INFO_SCALAR	 Info;
1675 
1676 			Entry->le_bus = 0xff;
1677 			Entry->le_target = 0xff;
1678 			Entry->le_lun = 0xff;
1679 
1680 			if ((Info = (PI2O_DPT_DEVICE_INFO_SCALAR)
1681 			  ASR_getParams(sc,
1682 			    I2O_LCT_ENTRY_getLocalTID(Entry),
1683 			    I2O_DPT_DEVICE_INFO_GROUP_NO,
1684 			    &Buffer, sizeof(struct DeviceInfo))) == NULL) {
1685 				continue;
1686 			}
1687 			Entry->le_type
1688 			  |= I2O_DPT_DEVICE_INFO_SCALAR_getDeviceType(Info);
1689 			Entry->le_bus
1690 			  = I2O_DPT_DEVICE_INFO_SCALAR_getBus(Info);
1691 			if ((Entry->le_bus > sc->ha_MaxBus)
1692 			 && (Entry->le_bus <= MAX_CHANNEL)) {
1693 				sc->ha_MaxBus = Entry->le_bus;
1694 			}
1695 			Entry->le_target
1696 			  = I2O_DPT_DEVICE_INFO_SCALAR_getIdentifier(Info);
1697 			Entry->le_lun
1698 			  = I2O_DPT_DEVICE_INFO_SCALAR_getLunInfo(Info);
1699 		}
1700 	}
1701 	/*
1702 	 *	A zero return value indicates success.
1703 	 */
1704 	return (0);
1705 } /* ASR_acquireLct */
1706 
1707 /*
1708  * Initialize a message frame.
1709  * We assume that the CDB has already been set up, so all we do here is
1710  * generate the Scatter Gather list.
1711  */
1712 static PI2O_MESSAGE_FRAME
1713 ASR_init_message(union asr_ccb *ccb, PI2O_MESSAGE_FRAME	Message)
1714 {
1715 	PI2O_MESSAGE_FRAME	Message_Ptr;
1716 	PI2O_SGE_SIMPLE_ELEMENT sg;
1717 	Asr_softc_t		*sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1718 	vm_size_t		size, len;
1719 	caddr_t			v;
1720 	U32			MessageSize;
1721 	int			next, span, base, rw;
1722 	int			target = ccb->ccb_h.target_id;
1723 	int			lun = ccb->ccb_h.target_lun;
1724 	int			bus =cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1725 	tid_t			TID;
1726 
1727 	/* We only need to zero out the PRIVATE_SCSI_SCB_EXECUTE_MESSAGE */
1728 	Message_Ptr = (I2O_MESSAGE_FRAME *)Message;
1729 	bzero(Message_Ptr, (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
1730 	      sizeof(I2O_SG_ELEMENT)));
1731 
1732 	if ((TID = ASR_getTid (sc, bus, target, lun)) == (tid_t)-1) {
1733 		PI2O_LCT_ENTRY Device;
1734 
1735 		TID = 0;
1736 		for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
1737 		    (((U32 *)sc->ha_LCT) + I2O_LCT_getTableSize(sc->ha_LCT));
1738 		    ++Device) {
1739 			if ((Device->le_type != I2O_UNKNOWN)
1740 			 && (Device->le_bus == bus)
1741 			 && (Device->le_target == target)
1742 			 && (Device->le_lun == lun)
1743 			 && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) {
1744 				TID = I2O_LCT_ENTRY_getLocalTID(Device);
1745 				ASR_setTid(sc, Device->le_bus,
1746 					   Device->le_target, Device->le_lun,
1747 					   TID);
1748 				break;
1749 			}
1750 		}
1751 	}
1752 	if (TID == (tid_t)0) {
1753 		return (NULL);
1754 	}
1755 	I2O_MESSAGE_FRAME_setTargetAddress(Message_Ptr, TID);
1756 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(
1757 	    (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, TID);
1758 	I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11 |
1759 	  (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1760 		/ sizeof(U32)) << 4));
1761 	I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
1762 	  (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1763 	  - sizeof(I2O_SG_ELEMENT)) / sizeof(U32));
1764 	I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
1765 	I2O_MESSAGE_FRAME_setFunction(Message_Ptr, I2O_PRIVATE_MESSAGE);
1766 	I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
1767 	  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
1768 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1769 	  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1770 	    I2O_SCB_FLAG_ENABLE_DISCONNECT
1771 	  | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1772 	  | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
1773 	/*
1774 	 * We do not need any (optional byteswapping) method access to
1775 	 * the Initiator & Transaction context field.
1776 	 */
1777 	I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
1778 
1779 	I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
1780 	  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID);
1781 	/*
1782 	 * copy the cdb over
1783 	 */
1784 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(
1785 	    (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, ccb->csio.cdb_len);
1786 	bcopy(&(ccb->csio.cdb_io),
1787 	    ((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->CDB,
1788 	    ccb->csio.cdb_len);
1789 
1790 	/*
1791 	 * Given a buffer describing a transfer, set up a scatter/gather map
1792 	 * in a ccb to map that SCSI transfer.
1793 	 */
1794 
1795 	rw = (ccb->ccb_h.flags & CAM_DIR_IN) ? 0 : I2O_SGL_FLAGS_DIR;
1796 
1797 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1798 	  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1799 	  (ccb->csio.dxfer_len)
1800 	    ? ((rw) ? (I2O_SCB_FLAG_XFER_TO_DEVICE
1801 		     | I2O_SCB_FLAG_ENABLE_DISCONNECT
1802 		     | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1803 		     | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)
1804 		    : (I2O_SCB_FLAG_XFER_FROM_DEVICE
1805 		     | I2O_SCB_FLAG_ENABLE_DISCONNECT
1806 		     | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1807 		     | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER))
1808 	    :	      (I2O_SCB_FLAG_ENABLE_DISCONNECT
1809 		     | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1810 		     | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
1811 
1812 	/*
1813 	 * Given a transfer described by a `data', fill in the SG list.
1814 	 */
1815 	sg = &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->SGL.u.Simple[0];
1816 
1817 	len = ccb->csio.dxfer_len;
1818 	v = ccb->csio.data_ptr;
1819 	KASSERT(ccb->csio.dxfer_len >= 0, ("csio.dxfer_len < 0"));
1820 	MessageSize = I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr);
1821 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
1822 	  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, len);
1823 	while ((len > 0) && (sg < &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1824 	  Message_Ptr)->SGL.u.Simple[SG_SIZE])) {
1825 		span = 0;
1826 		next = base = KVTOPHYS(v);
1827 		I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1828 
1829 		/* How far can we go contiguously */
1830 		while ((len > 0) && (base == next)) {
1831 			next = trunc_page(base) + PAGE_SIZE;
1832 			size = next - base;
1833 			if (size > len) {
1834 				size = len;
1835 			}
1836 			span += size;
1837 			v += size;
1838 			len -= size;
1839 			base = KVTOPHYS(v);
1840 		}
1841 
1842 		I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1843 		if (len == 0) {
1844 			rw |= I2O_SGL_FLAGS_LAST_ELEMENT;
1845 		}
1846 		I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount),
1847 		  I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | rw);
1848 		++sg;
1849 		MessageSize += sizeof(*sg) / sizeof(U32);
1850 	}
1851 	/* We always do the request sense ... */
1852 	if ((span = ccb->csio.sense_len) == 0) {
1853 		span = sizeof(ccb->csio.sense_data);
1854 	}
1855 	SG(sg, 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1856 	  &(ccb->csio.sense_data), span);
1857 	I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
1858 	  MessageSize + (sizeof(*sg) / sizeof(U32)));
1859 	return (Message_Ptr);
1860 } /* ASR_init_message */
1861 
1862 /*
1863  *	Reset the adapter.
1864  */
1865 static U32
1866 ASR_initOutBound(Asr_softc_t *sc)
1867 {
1868 	struct initOutBoundMessage {
1869 		I2O_EXEC_OUTBOUND_INIT_MESSAGE M;
1870 		U32			       R;
1871 	}				Message;
1872 	PI2O_EXEC_OUTBOUND_INIT_MESSAGE	Message_Ptr;
1873 	U32				*volatile Reply_Ptr;
1874 	U32				Old;
1875 
1876 	/*
1877 	 *  Build up our copy of the Message.
1878 	 */
1879 	Message_Ptr = (PI2O_EXEC_OUTBOUND_INIT_MESSAGE)ASR_fillMessage(&Message,
1880 	  sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE));
1881 	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1882 	  I2O_EXEC_OUTBOUND_INIT);
1883 	I2O_EXEC_OUTBOUND_INIT_MESSAGE_setHostPageFrameSize(Message_Ptr, PAGE_SIZE);
1884 	I2O_EXEC_OUTBOUND_INIT_MESSAGE_setOutboundMFrameSize(Message_Ptr,
1885 	  sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME));
1886 	/*
1887 	 *  Reset the Reply Status
1888 	 */
1889 	*(Reply_Ptr = (U32 *)((char *)Message_Ptr
1890 	  + sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE))) = 0;
1891 	SG (&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT, Reply_Ptr,
1892 	  sizeof(U32));
1893 	/*
1894 	 *	Send the Message out
1895 	 */
1896 	if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
1897 	    0xffffffff) {
1898 		u_long size, addr;
1899 
1900 		/*
1901 		 *	Wait for a response (Poll).
1902 		 */
1903 		while (*Reply_Ptr < I2O_EXEC_OUTBOUND_INIT_REJECTED);
1904 		/*
1905 		 *	Re-enable the interrupts.
1906 		 */
1907 		asr_set_intr(sc, Old);
1908 		/*
1909 		 *	Populate the outbound table.
1910 		 */
1911 		if (sc->ha_Msgs == NULL) {
1912 
1913 			/* Allocate the reply frames */
1914 			size = sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
1915 			  * sc->ha_Msgs_Count;
1916 
1917 			/*
1918 			 *	contigmalloc only works reliably at
1919 			 * initialization time.
1920 			 */
1921 			if ((sc->ha_Msgs = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
1922 			  contigmalloc (size, M_DEVBUF, M_WAITOK, 0ul,
1923 			    0xFFFFFFFFul, (u_long)sizeof(U32), 0ul)) != NULL) {
1924 				bzero(sc->ha_Msgs, size);
1925 				sc->ha_Msgs_Phys = KVTOPHYS(sc->ha_Msgs);
1926 			}
1927 		}
1928 
1929 		/* Initialize the outbound FIFO */
1930 		if (sc->ha_Msgs != NULL)
1931 			for(size = sc->ha_Msgs_Count, addr = sc->ha_Msgs_Phys;
1932 			    size; --size) {
1933 				asr_set_FromFIFO(sc, addr);
1934 				addr +=
1935 				    sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME);
1936 			}
1937 		return (*Reply_Ptr);
1938 	}
1939 	return (0);
1940 } /* ASR_initOutBound */
1941 
1942 /*
1943  *	Set the system table
1944  */
1945 static int
1946 ASR_setSysTab(Asr_softc_t *sc)
1947 {
1948 	PI2O_EXEC_SYS_TAB_SET_MESSAGE Message_Ptr;
1949 	PI2O_SET_SYSTAB_HEADER	      SystemTable;
1950 	Asr_softc_t		    * ha;
1951 	PI2O_SGE_SIMPLE_ELEMENT	      sg;
1952 	int			      retVal;
1953 
1954 	if ((SystemTable = (PI2O_SET_SYSTAB_HEADER)kmalloc (
1955 	  sizeof(I2O_SET_SYSTAB_HEADER), M_TEMP, M_WAITOK | M_ZERO)) == NULL) {
1956 		return (ENOMEM);
1957 	}
1958 	for (ha = Asr_softc_list; ha; ha = ha->ha_next) {
1959 		++SystemTable->NumberEntries;
1960 	}
1961 	if ((Message_Ptr = (PI2O_EXEC_SYS_TAB_SET_MESSAGE)kmalloc (
1962 	  sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
1963 	   + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)),
1964 	  M_TEMP, M_WAITOK)) == NULL) {
1965 		kfree(SystemTable, M_TEMP);
1966 		return (ENOMEM);
1967 	}
1968 	(void)ASR_fillMessage((void *)Message_Ptr,
1969 	  sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
1970 	   + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)));
1971 	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1972 	  (I2O_VERSION_11 +
1973 	  (((sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1974 			/ sizeof(U32)) << 4)));
1975 	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1976 	  I2O_EXEC_SYS_TAB_SET);
1977 	/*
1978 	 *	Call the LCT table to determine the number of device entries
1979 	 * to reserve space for.
1980 	 *	since this code is reused in several systems, code efficiency
1981 	 * is greater by using a shift operation rather than a divide by
1982 	 * sizeof(u_int32_t).
1983 	 */
1984 	sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
1985 	  + ((I2O_MESSAGE_FRAME_getVersionOffset(
1986 	      &(Message_Ptr->StdMessageFrame)) & 0xF0) >> 2));
1987 	SG(sg, 0, I2O_SGL_FLAGS_DIR, SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
1988 	++sg;
1989 	for (ha = Asr_softc_list; ha; ha = ha->ha_next) {
1990 		SG(sg, 0,
1991 		  ((ha->ha_next)
1992 		    ? (I2O_SGL_FLAGS_DIR)
1993 		    : (I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER)),
1994 		  &(ha->ha_SystemTable), sizeof(ha->ha_SystemTable));
1995 		++sg;
1996 	}
1997 	SG(sg, 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
1998 	SG(sg, 1, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_LAST_ELEMENT
1999 	    | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2000 	retVal = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2001 	kfree(Message_Ptr, M_TEMP);
2002 	kfree(SystemTable, M_TEMP);
2003 	return (retVal);
2004 } /* ASR_setSysTab */
2005 
2006 static int
2007 ASR_acquireHrt(Asr_softc_t *sc)
2008 {
2009 	I2O_EXEC_HRT_GET_MESSAGE	Message;
2010 	I2O_EXEC_HRT_GET_MESSAGE	*Message_Ptr;
2011 	struct {
2012 		I2O_HRT	      Header;
2013 		I2O_HRT_ENTRY Entry[MAX_CHANNEL];
2014 	}				Hrt;
2015 	u_int8_t			NumberOfEntries;
2016 	PI2O_HRT_ENTRY			Entry;
2017 
2018 	bzero(&Hrt, sizeof (Hrt));
2019 	Message_Ptr = (I2O_EXEC_HRT_GET_MESSAGE *)ASR_fillMessage(&Message,
2020 	  sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2021 	  + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2022 	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2023 	  (I2O_VERSION_11
2024 	  + (((sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2025 		   / sizeof(U32)) << 4)));
2026 	I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
2027 	  I2O_EXEC_HRT_GET);
2028 
2029 	/*
2030 	 *  Set up the buffers as scatter gather elements.
2031 	 */
2032 	SG(&(Message_Ptr->SGL), 0,
2033 	  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2034 	  &Hrt, sizeof(Hrt));
2035 	if (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != CAM_REQ_CMP) {
2036 		return (ENODEV);
2037 	}
2038 	if ((NumberOfEntries = I2O_HRT_getNumberEntries(&Hrt.Header))
2039 	  > (MAX_CHANNEL + 1)) {
2040 		NumberOfEntries = MAX_CHANNEL + 1;
2041 	}
2042 	for (Entry = Hrt.Header.HRTEntry;
2043 	  NumberOfEntries != 0;
2044 	  ++Entry, --NumberOfEntries) {
2045 		PI2O_LCT_ENTRY Device;
2046 
2047 		for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2048 		  (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2049 		  ++Device) {
2050 			if (I2O_LCT_ENTRY_getLocalTID(Device)
2051 			  == (I2O_HRT_ENTRY_getAdapterID(Entry) & 0xFFF)) {
2052 				Device->le_bus = I2O_HRT_ENTRY_getAdapterID(
2053 				  Entry) >> 16;
2054 				if ((Device->le_bus > sc->ha_MaxBus)
2055 				 && (Device->le_bus <= MAX_CHANNEL)) {
2056 					sc->ha_MaxBus = Device->le_bus;
2057 				}
2058 			}
2059 		}
2060 	}
2061 	return (0);
2062 } /* ASR_acquireHrt */
2063 
2064 /*
2065  *	Enable the adapter.
2066  */
2067 static int
2068 ASR_enableSys(Asr_softc_t *sc)
2069 {
2070 	I2O_EXEC_SYS_ENABLE_MESSAGE	Message;
2071 	PI2O_EXEC_SYS_ENABLE_MESSAGE	Message_Ptr;
2072 
2073 	Message_Ptr = (PI2O_EXEC_SYS_ENABLE_MESSAGE)ASR_fillMessage(&Message,
2074 	  sizeof(I2O_EXEC_SYS_ENABLE_MESSAGE));
2075 	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2076 	  I2O_EXEC_SYS_ENABLE);
2077 	return (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != 0);
2078 } /* ASR_enableSys */
2079 
2080 /*
2081  *	Perform the stages necessary to initialize the adapter
2082  */
2083 static int
2084 ASR_init(Asr_softc_t *sc)
2085 {
2086 	return ((ASR_initOutBound(sc) == 0)
2087 	 || (ASR_setSysTab(sc) != CAM_REQ_CMP)
2088 	 || (ASR_enableSys(sc) != CAM_REQ_CMP));
2089 } /* ASR_init */
2090 
2091 /*
2092  *	Send a Synchronize Cache command to the target device.
2093  */
2094 static void
2095 ASR_sync(Asr_softc_t *sc, int bus, int target, int lun)
2096 {
2097 	tid_t TID;
2098 
2099 	/*
2100 	 * We will not synchronize the device when there are outstanding
2101 	 * commands issued by the OS (this is due to a locked up device,
2102 	 * as the OS normally would flush all outstanding commands before
2103 	 * issuing a shutdown or an adapter reset).
2104 	 */
2105 	if ((sc != NULL)
2106 	 && (LIST_FIRST(&(sc->ha_ccb)) != NULL)
2107 	 && ((TID = ASR_getTid (sc, bus, target, lun)) != (tid_t)-1)
2108 	 && (TID != (tid_t)0)) {
2109 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message;
2110 		PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message_Ptr;
2111 
2112 		Message_Ptr = (PRIVATE_SCSI_SCB_EXECUTE_MESSAGE *)&Message;
2113 		bzero(Message_Ptr, sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2114 		    - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2115 
2116 		I2O_MESSAGE_FRAME_setVersionOffset(
2117 		  (PI2O_MESSAGE_FRAME)Message_Ptr,
2118 		  I2O_VERSION_11
2119 		    | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2120 		    - sizeof(I2O_SG_ELEMENT))
2121 			/ sizeof(U32)) << 4));
2122 		I2O_MESSAGE_FRAME_setMessageSize(
2123 		  (PI2O_MESSAGE_FRAME)Message_Ptr,
2124 		  (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2125 		  - sizeof(I2O_SG_ELEMENT))
2126 			/ sizeof(U32));
2127 		I2O_MESSAGE_FRAME_setInitiatorAddress (
2128 		  (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2129 		I2O_MESSAGE_FRAME_setFunction(
2130 		  (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2131 		I2O_MESSAGE_FRAME_setTargetAddress(
2132 		  (PI2O_MESSAGE_FRAME)Message_Ptr, TID);
2133 		I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2134 		  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2135 		  I2O_SCSI_SCB_EXEC);
2136 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(Message_Ptr, TID);
2137 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2138 		    I2O_SCB_FLAG_ENABLE_DISCONNECT
2139 		  | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2140 		  | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2141 		I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2142 		  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2143 		  DPT_ORGANIZATION_ID);
2144 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2145 		Message_Ptr->CDB[0] = SYNCHRONIZE_CACHE;
2146 		Message_Ptr->CDB[1] = (lun << 5);
2147 
2148 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2149 		  (I2O_SCB_FLAG_XFER_FROM_DEVICE
2150 		    | I2O_SCB_FLAG_ENABLE_DISCONNECT
2151 		    | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2152 		    | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2153 
2154 		(void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2155 
2156 	}
2157 }
2158 
2159 static void
2160 ASR_synchronize(Asr_softc_t *sc)
2161 {
2162 	int bus, target, lun;
2163 
2164 	for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2165 		for (target = 0; target <= sc->ha_MaxId; ++target) {
2166 			for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
2167 				ASR_sync(sc,bus,target,lun);
2168 			}
2169 		}
2170 	}
2171 }
2172 
2173 /*
2174  *	Reset the HBA, targets and BUS.
2175  *		Currently this resets *all* the SCSI busses.
2176  */
2177 static __inline void
2178 asr_hbareset(Asr_softc_t *sc)
2179 {
2180 	ASR_synchronize(sc);
2181 	(void)ASR_reset(sc);
2182 } /* asr_hbareset */
2183 
2184 /*
2185  *	A reduced copy of the real pci_map_mem, incorporating the MAX_MAP
2186  * limit and a reduction in error checking (in the pre 4.0 case).
2187  */
2188 static int
2189 asr_pci_map_mem(device_t dev, Asr_softc_t *sc)
2190 {
2191 	int		rid;
2192 	u_int32_t	p, l, s;
2193 
2194 	/*
2195 	 * I2O specification says we must find first *memory* mapped BAR
2196 	 */
2197 	for (rid = 0; rid < 4; rid++) {
2198 		p = pci_read_config(dev, PCIR_BAR(rid), sizeof(p));
2199 		if ((p & 1) == 0) {
2200 			break;
2201 		}
2202 	}
2203 	/*
2204 	 *	Give up?
2205 	 */
2206 	if (rid >= 4) {
2207 		rid = 0;
2208 	}
2209 	rid = PCIR_BAR(rid);
2210 	p = pci_read_config(dev, rid, sizeof(p));
2211 	pci_write_config(dev, rid, -1, sizeof(p));
2212 	l = 0 - (pci_read_config(dev, rid, sizeof(l)) & ~15);
2213 	pci_write_config(dev, rid, p, sizeof(p));
2214 	if (l > MAX_MAP) {
2215 		l = MAX_MAP;
2216 	}
2217 	/*
2218 	 * The 2005S Zero Channel RAID solution is not a perfect PCI
2219 	 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once
2220 	 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to
2221 	 * BAR0+2MB and sets it's size to 2MB. The IOP registers are
2222 	 * accessible via BAR0, the messaging registers are accessible
2223 	 * via BAR1. If the subdevice code is 50 to 59 decimal.
2224 	 */
2225 	s = pci_read_config(dev, PCIR_DEVVENDOR, sizeof(s));
2226 	if (s != 0xA5111044) {
2227 		s = pci_read_config(dev, PCIR_SUBVEND_0, sizeof(s));
2228 		if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0)
2229 		 && (ADPTDOMINATOR_SUB_ID_START <= s)
2230 		 && (s <= ADPTDOMINATOR_SUB_ID_END)) {
2231 			l = MAX_MAP; /* Conjoined BAR Raptor Daptor */
2232 		}
2233 	}
2234 	p &= ~15;
2235 	sc->ha_mem_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
2236 	  p, p + l, l, RF_ACTIVE);
2237 	if (sc->ha_mem_res == NULL) {
2238 		return (0);
2239 	}
2240 	sc->ha_Base = rman_get_start(sc->ha_mem_res);
2241 	sc->ha_i2o_bhandle = rman_get_bushandle(sc->ha_mem_res);
2242 	sc->ha_i2o_btag = rman_get_bustag(sc->ha_mem_res);
2243 
2244 	if (s == 0xA5111044) { /* Split BAR Raptor Daptor */
2245 		if ((rid += sizeof(u_int32_t)) >= PCIR_BAR(4)) {
2246 			return (0);
2247 		}
2248 		p = pci_read_config(dev, rid, sizeof(p));
2249 		pci_write_config(dev, rid, -1, sizeof(p));
2250 		l = 0 - (pci_read_config(dev, rid, sizeof(l)) & ~15);
2251 		pci_write_config(dev, rid, p, sizeof(p));
2252 		if (l > MAX_MAP) {
2253 			l = MAX_MAP;
2254 		}
2255 		p &= ~15;
2256 		sc->ha_mes_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
2257 		  p, p + l, l, RF_ACTIVE);
2258 		if (sc->ha_mes_res == NULL) {
2259 			return (0);
2260 		}
2261 		sc->ha_frame_bhandle = rman_get_bushandle(sc->ha_mes_res);
2262 		sc->ha_frame_btag = rman_get_bustag(sc->ha_mes_res);
2263 	} else {
2264 		sc->ha_frame_bhandle = sc->ha_i2o_bhandle;
2265 		sc->ha_frame_btag = sc->ha_i2o_btag;
2266 	}
2267 	return (1);
2268 } /* asr_pci_map_mem */
2269 
2270 /*
2271  *	A simplified copy of the real pci_map_int with additional
2272  * registration requirements.
2273  */
2274 static int
2275 asr_pci_map_int(device_t dev, Asr_softc_t *sc)
2276 {
2277 	int rid = 0;
2278 
2279 	sc->ha_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2280 	  RF_ACTIVE | RF_SHAREABLE);
2281 	if (sc->ha_irq_res == NULL) {
2282 		return (0);
2283 	}
2284 	if (bus_setup_intr(dev, sc->ha_irq_res, 0,
2285 	  (driver_intr_t *)asr_intr, (void *)sc, &(sc->ha_intr), NULL)) {
2286 		return (0);
2287 	}
2288 	sc->ha_irq = pci_read_config(dev, PCIR_INTLINE, sizeof(char));
2289 	return (1);
2290 } /* asr_pci_map_int */
2291 
2292 static void
2293 asr_status_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2294 {
2295 	Asr_softc_t *sc;
2296 
2297 	if (error)
2298 		return;
2299 
2300 	sc = (Asr_softc_t *)arg;
2301 
2302 	/* XXX
2303 	 * The status word can be at a 64-bit address, but the existing
2304 	 * accessor macros simply cannot manipulate 64-bit addresses.
2305 	 */
2306 	sc->ha_status_phys = (u_int32_t)segs[0].ds_addr +
2307 	    offsetof(struct Asr_status_mem, status);
2308 	sc->ha_rstatus_phys = (u_int32_t)segs[0].ds_addr +
2309 	    offsetof(struct Asr_status_mem, rstatus);
2310 }
2311 
2312 static int
2313 asr_alloc_dma(Asr_softc_t *sc)
2314 {
2315 	device_t dev;
2316 
2317 	dev = sc->ha_dev;
2318 
2319 	if (bus_dma_tag_create(NULL,			/* parent */
2320 			       1, 0,			/* algnmnt, boundary */
2321 			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2322 			       BUS_SPACE_MAXADDR,	/* highaddr */
2323 			       NULL, NULL,		/* filter, filterarg */
2324 			       BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
2325 			       BUS_SPACE_UNRESTRICTED,	/* nsegments */
2326 			       BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
2327 			       0,			/* flags */
2328 			       &sc->ha_parent_dmat)) {
2329 		device_printf(dev, "Cannot allocate parent DMA tag\n");
2330 		return (ENOMEM);
2331 	}
2332 
2333 	if (bus_dma_tag_create(sc->ha_parent_dmat,	/* parent */
2334 			       1, 0,			/* algnmnt, boundary */
2335 			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2336 			       BUS_SPACE_MAXADDR,	/* highaddr */
2337 			       NULL, NULL,		/* filter, filterarg */
2338 			       sizeof(sc->ha_statusmem),/* maxsize */
2339 			       1,			/* nsegments */
2340 			       sizeof(sc->ha_statusmem),/* maxsegsize */
2341 			       0,			/* flags */
2342 			       &sc->ha_statusmem_dmat)) {
2343 		device_printf(dev, "Cannot allocate status DMA tag\n");
2344 		bus_dma_tag_destroy(sc->ha_parent_dmat);
2345 		return (ENOMEM);
2346 	}
2347 
2348 	if (bus_dmamem_alloc(sc->ha_statusmem_dmat, (void **)&sc->ha_statusmem,
2349 	    BUS_DMA_NOWAIT, &sc->ha_statusmem_dmamap)) {
2350 		device_printf(dev, "Cannot allocate status memory\n");
2351 		bus_dma_tag_destroy(sc->ha_statusmem_dmat);
2352 		bus_dma_tag_destroy(sc->ha_parent_dmat);
2353 		return (ENOMEM);
2354 	}
2355 	(void)bus_dmamap_load(sc->ha_statusmem_dmat, sc->ha_statusmem_dmamap,
2356 	    sc->ha_statusmem, sizeof(sc->ha_statusmem), asr_status_cb, sc, 0);
2357 
2358 	return (0);
2359 }
2360 
2361 static void
2362 asr_release_dma(Asr_softc_t *sc)
2363 {
2364 
2365 	if (sc->ha_rstatus_phys != 0)
2366 		bus_dmamap_unload(sc->ha_statusmem_dmat,
2367 		    sc->ha_statusmem_dmamap);
2368 	if (sc->ha_statusmem != NULL)
2369 		bus_dmamem_free(sc->ha_statusmem_dmat, sc->ha_statusmem,
2370 		    sc->ha_statusmem_dmamap);
2371 	if (sc->ha_statusmem_dmat != NULL)
2372 		bus_dma_tag_destroy(sc->ha_statusmem_dmat);
2373 	if (sc->ha_parent_dmat != NULL)
2374 		bus_dma_tag_destroy(sc->ha_parent_dmat);
2375 }
2376 
2377 /*
2378  *	Attach the devices, and virtual devices to the driver list.
2379  */
2380 static int
2381 asr_attach(device_t dev)
2382 {
2383 	PI2O_EXEC_STATUS_GET_REPLY status;
2384 	PI2O_LCT_ENTRY		 Device;
2385 	Asr_softc_t		 *sc, **ha;
2386 	struct scsi_inquiry_data *iq;
2387 	int			 bus, size, unit;
2388 	int			 error;
2389 
2390 	sc = device_get_softc(dev);
2391 	unit = device_get_unit(dev);
2392 	sc->ha_dev = dev;
2393 
2394 	if (Asr_softc_list == NULL) {
2395 		/*
2396 		 *	Fixup the OS revision as saved in the dptsig for the
2397 		 *	engine (dptioctl.h) to pick up.
2398 		 */
2399 		bcopy(osrelease, &ASR_sig.dsDescription[16], 5);
2400 	}
2401 	/*
2402 	 *	Initialize the software structure
2403 	 */
2404 	LIST_INIT(&(sc->ha_ccb));
2405 	/* Link us into the HA list */
2406 	for (ha = &Asr_softc_list; *ha; ha = &((*ha)->ha_next));
2407 		*(ha) = sc;
2408 
2409 	/*
2410 	 *	This is the real McCoy!
2411 	 */
2412 	if (!asr_pci_map_mem(dev, sc)) {
2413 		device_printf(dev, "could not map memory\n");
2414 		return(ENXIO);
2415 	}
2416 	/* Enable if not formerly enabled */
2417 	pci_write_config(dev, PCIR_COMMAND,
2418 	    pci_read_config(dev, PCIR_COMMAND, sizeof(char)) |
2419 	    PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN, sizeof(char));
2420 
2421 	sc->ha_pciBusNum = pci_get_bus(dev);
2422 	sc->ha_pciDeviceNum = (pci_get_slot(dev) << 3) | pci_get_function(dev);
2423 
2424 	if ((error = asr_alloc_dma(sc)) != 0)
2425 		return (error);
2426 
2427 	/* Check if the device is there? */
2428 	if (ASR_resetIOP(sc) == 0) {
2429 		device_printf(dev, "Cannot reset adapter\n");
2430 		asr_release_dma(sc);
2431 		return (EIO);
2432 	}
2433 	status = &sc->ha_statusmem->status;
2434 	if (ASR_getStatus(sc) == NULL) {
2435 		device_printf(dev, "could not initialize hardware\n");
2436 		asr_release_dma(sc);
2437 		return(ENODEV);
2438 	}
2439 	sc->ha_SystemTable.OrganizationID = status->OrganizationID;
2440 	sc->ha_SystemTable.IOP_ID = status->IOP_ID;
2441 	sc->ha_SystemTable.I2oVersion = status->I2oVersion;
2442 	sc->ha_SystemTable.IopState = status->IopState;
2443 	sc->ha_SystemTable.MessengerType = status->MessengerType;
2444 	sc->ha_SystemTable.InboundMessageFrameSize = status->InboundMFrameSize;
2445 	sc->ha_SystemTable.MessengerInfo.InboundMessagePortAddressLow =
2446 	    (U32)(sc->ha_Base + I2O_REG_TOFIFO);	/* XXX 64-bit */
2447 
2448 	if (!asr_pci_map_int(dev, (void *)sc)) {
2449 		device_printf(dev, "could not map interrupt\n");
2450 		asr_release_dma(sc);
2451 		return(ENXIO);
2452 	}
2453 
2454 	/* Adjust the maximim inbound count */
2455 	if (((sc->ha_QueueSize =
2456 	    I2O_EXEC_STATUS_GET_REPLY_getMaxInboundMFrames(status)) >
2457 	    MAX_INBOUND) || (sc->ha_QueueSize == 0)) {
2458 		sc->ha_QueueSize = MAX_INBOUND;
2459 	}
2460 
2461 	/* Adjust the maximum outbound count */
2462 	if (((sc->ha_Msgs_Count =
2463 	    I2O_EXEC_STATUS_GET_REPLY_getMaxOutboundMFrames(status)) >
2464 	    MAX_OUTBOUND) || (sc->ha_Msgs_Count == 0)) {
2465 		sc->ha_Msgs_Count = MAX_OUTBOUND;
2466 	}
2467 	if (sc->ha_Msgs_Count > sc->ha_QueueSize) {
2468 		sc->ha_Msgs_Count = sc->ha_QueueSize;
2469 	}
2470 
2471 	/* Adjust the maximum SG size to adapter */
2472 	if ((size = (I2O_EXEC_STATUS_GET_REPLY_getInboundMFrameSize(status) <<
2473 	    2)) > MAX_INBOUND_SIZE) {
2474 		size = MAX_INBOUND_SIZE;
2475 	}
2476 	sc->ha_SgSize = (size - sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2477 	  + sizeof(I2O_SG_ELEMENT)) / sizeof(I2O_SGE_SIMPLE_ELEMENT);
2478 
2479 	/*
2480 	 *	Only do a bus/HBA reset on the first time through. On this
2481 	 * first time through, we do not send a flush to the devices.
2482 	 */
2483 	if (ASR_init(sc) == 0) {
2484 		struct BufferInfo {
2485 			I2O_PARAM_RESULTS_LIST_HEADER	    Header;
2486 			I2O_PARAM_READ_OPERATION_RESULT	    Read;
2487 			I2O_DPT_EXEC_IOP_BUFFERS_SCALAR	    Info;
2488 		} Buffer;
2489 		PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2490 #define FW_DEBUG_BLED_OFFSET 8
2491 
2492 		if ((Info = (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)
2493 		    ASR_getParams(sc, 0, I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO,
2494 		    &Buffer, sizeof(struct BufferInfo))) != NULL) {
2495 			sc->ha_blinkLED = FW_DEBUG_BLED_OFFSET +
2496 			    I2O_DPT_EXEC_IOP_BUFFERS_SCALAR_getSerialOutputOffset(Info);
2497 		}
2498 		if (ASR_acquireLct(sc) == 0) {
2499 			(void)ASR_acquireHrt(sc);
2500 		}
2501 	} else {
2502 		device_printf(dev, "failed to initialize\n");
2503 		asr_release_dma(sc);
2504 		return(ENXIO);
2505 	}
2506 	/*
2507 	 *	Add in additional probe responses for more channels. We
2508 	 * are reusing the variable `target' for a channel loop counter.
2509 	 * Done here because of we need both the acquireLct and
2510 	 * acquireHrt data.
2511 	 */
2512 	for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2513 	    (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); ++Device) {
2514 		if (Device->le_type == I2O_UNKNOWN) {
2515 			continue;
2516 		}
2517 		if (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF) {
2518 			if (Device->le_target > sc->ha_MaxId) {
2519 				sc->ha_MaxId = Device->le_target;
2520 			}
2521 			if (Device->le_lun > sc->ha_MaxLun) {
2522 				sc->ha_MaxLun = Device->le_lun;
2523 			}
2524 		}
2525 		if (((Device->le_type & I2O_PORT) != 0)
2526 		 && (Device->le_bus <= MAX_CHANNEL)) {
2527 			/* Do not increase MaxId for efficiency */
2528 			sc->ha_adapter_target[Device->le_bus] =
2529 			    Device->le_target;
2530 		}
2531 	}
2532 
2533 	/*
2534 	 *	Print the HBA model number as inquired from the card.
2535 	 */
2536 
2537 	device_printf(dev, " ");
2538 
2539 	if ((iq = (struct scsi_inquiry_data *)kmalloc(
2540 	    sizeof(struct scsi_inquiry_data), M_TEMP, M_WAITOK | M_ZERO)) !=
2541 	    NULL) {
2542 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message;
2543 		PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message_Ptr;
2544 		int					posted = 0;
2545 
2546 		Message_Ptr = (PRIVATE_SCSI_SCB_EXECUTE_MESSAGE *)&Message;
2547 		bzero(Message_Ptr, sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
2548 		    sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2549 
2550 		I2O_MESSAGE_FRAME_setVersionOffset(
2551 		    (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_VERSION_11 |
2552 		    (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2553 		    - sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4));
2554 		I2O_MESSAGE_FRAME_setMessageSize(
2555 		    (PI2O_MESSAGE_FRAME)Message_Ptr,
2556 		    (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
2557 		    sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)) /
2558 		    sizeof(U32));
2559 		I2O_MESSAGE_FRAME_setInitiatorAddress(
2560 		    (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2561 		I2O_MESSAGE_FRAME_setFunction(
2562 		    (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2563 		I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode(
2564 		    (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
2565 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2566 		    I2O_SCB_FLAG_ENABLE_DISCONNECT
2567 		  | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2568 		  | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2569 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setInterpret(Message_Ptr, 1);
2570 		I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2571 		    (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2572 		    DPT_ORGANIZATION_ID);
2573 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2574 		Message_Ptr->CDB[0] = INQUIRY;
2575 		Message_Ptr->CDB[4] =
2576 		    (unsigned char)sizeof(struct scsi_inquiry_data);
2577 		if (Message_Ptr->CDB[4] == 0) {
2578 			Message_Ptr->CDB[4] = 255;
2579 		}
2580 
2581 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2582 		  (I2O_SCB_FLAG_XFER_FROM_DEVICE
2583 		    | I2O_SCB_FLAG_ENABLE_DISCONNECT
2584 		    | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2585 		    | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2586 
2587 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
2588 		  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
2589 		  sizeof(struct scsi_inquiry_data));
2590 		SG(&(Message_Ptr->SGL), 0,
2591 		  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2592 		  iq, sizeof(struct scsi_inquiry_data));
2593 		(void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2594 
2595 		if (iq->vendor[0] && (iq->vendor[0] != ' ')) {
2596 			kprintf (" ");
2597 			ASR_prstring (iq->vendor, 8);
2598 			++posted;
2599 		}
2600 		if (iq->product[0] && (iq->product[0] != ' ')) {
2601 			kprintf (" ");
2602 			ASR_prstring (iq->product, 16);
2603 			++posted;
2604 		}
2605 		if (iq->revision[0] && (iq->revision[0] != ' ')) {
2606 			kprintf (" FW Rev. ");
2607 			ASR_prstring (iq->revision, 4);
2608 			++posted;
2609 		}
2610 		kfree(iq, M_TEMP);
2611 		if (posted) {
2612 			kprintf (",");
2613 		}
2614 	}
2615 	kprintf (" %d channel, %d CCBs, Protocol I2O\n", sc->ha_MaxBus + 1,
2616 	  (sc->ha_QueueSize > MAX_INBOUND) ? MAX_INBOUND : sc->ha_QueueSize);
2617 
2618 	for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2619 		struct cam_devq	  * devq;
2620 		int		    QueueSize = sc->ha_QueueSize;
2621 
2622 		if (QueueSize > MAX_INBOUND) {
2623 			QueueSize = MAX_INBOUND;
2624 		}
2625 
2626 		/*
2627 		 *	Create the device queue for our SIM(s).
2628 		 */
2629 		if ((devq = cam_simq_alloc(QueueSize)) == NULL) {
2630 			continue;
2631 		}
2632 
2633 		/*
2634 		 *	Construct our first channel SIM entry
2635 		 */
2636 		sc->ha_sim[bus] = cam_sim_alloc(asr_action, asr_poll, "asr", sc,
2637 						unit, &sim_mplock,
2638 						1, QueueSize, devq);
2639 		if (sc->ha_sim[bus] == NULL) {
2640 			continue;
2641 		}
2642 
2643 		if (xpt_bus_register(sc->ha_sim[bus], bus) != CAM_SUCCESS){
2644 			cam_sim_free(sc->ha_sim[bus]);
2645 			sc->ha_sim[bus] = NULL;
2646 			continue;
2647 		}
2648 
2649 		if (xpt_create_path(&(sc->ha_path[bus]), /*periph*/NULL,
2650 		    cam_sim_path(sc->ha_sim[bus]), CAM_TARGET_WILDCARD,
2651 		    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2652 			xpt_bus_deregister( cam_sim_path(sc->ha_sim[bus]));
2653 			cam_sim_free(sc->ha_sim[bus]);
2654 			sc->ha_sim[bus] = NULL;
2655 			continue;
2656 		}
2657 	}
2658 
2659 	/*
2660 	 *	Generate the device node information
2661 	 */
2662 	sc->ha_devt = make_dev(&asr_ops, unit, UID_ROOT, GID_OPERATOR, 0640,
2663 			       "asr%d", unit);
2664 	if (sc->ha_devt != NULL)
2665 		(void)make_dev_alias(sc->ha_devt, "rdpti%d", unit);
2666 	sc->ha_devt->si_drv1 = sc;
2667 	return(0);
2668 } /* asr_attach */
2669 
2670 static void
2671 asr_poll(struct cam_sim *sim)
2672 {
2673 	asr_intr(cam_sim_softc(sim));
2674 } /* asr_poll */
2675 
2676 static void
2677 asr_action(struct cam_sim *sim, union ccb  *ccb)
2678 {
2679 	struct Asr_softc *sc;
2680 
2681 	debug_asr_printf("asr_action(%lx,%lx{%x})\n", (u_long)sim, (u_long)ccb,
2682 			 ccb->ccb_h.func_code);
2683 
2684 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("asr_action\n"));
2685 
2686 	ccb->ccb_h.spriv_ptr0 = sc = (struct Asr_softc *)cam_sim_softc(sim);
2687 
2688 	switch (ccb->ccb_h.func_code) {
2689 
2690 	/* Common cases first */
2691 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
2692 	{
2693 		struct Message {
2694 			char M[MAX_INBOUND_SIZE];
2695 		} Message;
2696 		PI2O_MESSAGE_FRAME   Message_Ptr;
2697 
2698 		/* Reject incoming commands while we are resetting the card */
2699 		if (sc->ha_in_reset != HA_OPERATIONAL) {
2700 			ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2701 			if (sc->ha_in_reset >= HA_OFF_LINE) {
2702 				/* HBA is now off-line */
2703 				ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
2704 			} else {
2705 				/* HBA currently resetting, try again later. */
2706 				ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2707 			}
2708 			debug_asr_cmd_printf (" e\n");
2709 			xpt_done(ccb);
2710 			debug_asr_cmd_printf (" q\n");
2711 			break;
2712 		}
2713 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2714 			kprintf(
2715 			  "asr%d WARNING: scsi_cmd(%x) already done on b%dt%du%d\n",
2716 			  cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
2717 			  ccb->csio.cdb_io.cdb_bytes[0],
2718 			  cam_sim_bus(sim),
2719 			  ccb->ccb_h.target_id,
2720 			  ccb->ccb_h.target_lun);
2721 		}
2722 		debug_asr_cmd_printf("(%d,%d,%d,%d)", cam_sim_unit(sim),
2723 				     cam_sim_bus(sim), ccb->ccb_h.target_id,
2724 				     ccb->ccb_h.target_lun);
2725 		debug_asr_dump_ccb(ccb);
2726 
2727 		if ((Message_Ptr = ASR_init_message((union asr_ccb *)ccb,
2728 		  (PI2O_MESSAGE_FRAME)&Message)) != NULL) {
2729 			debug_asr_cmd2_printf ("TID=%x:\n",
2730 			  PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_getTID(
2731 			    (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr));
2732 			debug_asr_cmd2_dump_message(Message_Ptr);
2733 			debug_asr_cmd1_printf (" q");
2734 
2735 			if (ASR_queue (sc, Message_Ptr) == EMPTY_QUEUE) {
2736 				ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2737 				ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2738 				debug_asr_cmd_printf (" E\n");
2739 				xpt_done(ccb);
2740 			}
2741 			debug_asr_cmd_printf(" Q\n");
2742 			break;
2743 		}
2744 		/*
2745 		 *	We will get here if there is no valid TID for the device
2746 		 * referenced in the scsi command packet.
2747 		 */
2748 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2749 		ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
2750 		debug_asr_cmd_printf (" B\n");
2751 		xpt_done(ccb);
2752 		break;
2753 	}
2754 
2755 	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
2756 		/* Rese HBA device ... */
2757 		asr_hbareset (sc);
2758 		ccb->ccb_h.status = CAM_REQ_CMP;
2759 		xpt_done(ccb);
2760 		break;
2761 
2762 #if (defined(REPORT_LUNS))
2763 	case REPORT_LUNS:
2764 #endif
2765 	case XPT_ABORT:			/* Abort the specified CCB */
2766 		/* XXX Implement */
2767 		ccb->ccb_h.status = CAM_REQ_INVALID;
2768 		xpt_done(ccb);
2769 		break;
2770 
2771 	case XPT_SET_TRAN_SETTINGS:
2772 		/* XXX Implement */
2773 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2774 		xpt_done(ccb);
2775 		break;
2776 
2777 	case XPT_GET_TRAN_SETTINGS:
2778 	/* Get default/user set transfer settings for the target */
2779 	{
2780 		struct	ccb_trans_settings *cts = &(ccb->cts);
2781 		struct ccb_trans_settings_scsi *scsi =
2782 		    &cts->proto_specific.scsi;
2783 		struct ccb_trans_settings_spi *spi =
2784 		    &cts->xport_specific.spi;
2785 
2786 		if (cts->type == CTS_TYPE_USER_SETTINGS) {
2787 			cts->protocol = PROTO_SCSI;
2788 			cts->protocol_version = SCSI_REV_2;
2789 			cts->transport = XPORT_SPI;
2790 			cts->transport_version = 2;
2791 
2792 			scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2793 			spi->flags = CTS_SPI_FLAGS_DISC_ENB;
2794 			spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2795 			spi->sync_period = 6; /* 40MHz */
2796 			spi->sync_offset = 15;
2797 			spi->valid = CTS_SPI_VALID_SYNC_RATE
2798 				   | CTS_SPI_VALID_SYNC_OFFSET
2799 				   | CTS_SPI_VALID_BUS_WIDTH
2800 				   | CTS_SPI_VALID_DISC;
2801 			scsi->valid = CTS_SCSI_VALID_TQ;
2802 
2803 			ccb->ccb_h.status = CAM_REQ_CMP;
2804 		} else {
2805 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2806 		}
2807 		xpt_done(ccb);
2808 		break;
2809 	}
2810 
2811 	case XPT_CALC_GEOMETRY:
2812 	{
2813 		struct	  ccb_calc_geometry *ccg;
2814 		u_int32_t size_mb;
2815 		u_int32_t secs_per_cylinder;
2816 
2817 		ccg = &(ccb->ccg);
2818 		size_mb = ccg->volume_size
2819 			/ ((1024L * 1024L) / ccg->block_size);
2820 
2821 		if (size_mb > 4096) {
2822 			ccg->heads = 255;
2823 			ccg->secs_per_track = 63;
2824 		} else if (size_mb > 2048) {
2825 			ccg->heads = 128;
2826 			ccg->secs_per_track = 63;
2827 		} else if (size_mb > 1024) {
2828 			ccg->heads = 65;
2829 			ccg->secs_per_track = 63;
2830 		} else {
2831 			ccg->heads = 64;
2832 			ccg->secs_per_track = 32;
2833 		}
2834 		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2835 		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2836 		ccb->ccb_h.status = CAM_REQ_CMP;
2837 		xpt_done(ccb);
2838 		break;
2839 	}
2840 
2841 	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
2842 		ASR_resetBus (sc, cam_sim_bus(sim));
2843 		ccb->ccb_h.status = CAM_REQ_CMP;
2844 		xpt_done(ccb);
2845 		break;
2846 
2847 	case XPT_TERM_IO:		/* Terminate the I/O process */
2848 		/* XXX Implement */
2849 		ccb->ccb_h.status = CAM_REQ_INVALID;
2850 		xpt_done(ccb);
2851 		break;
2852 
2853 	case XPT_PATH_INQ:		/* Path routing inquiry */
2854 	{
2855 		struct ccb_pathinq *cpi = &(ccb->cpi);
2856 
2857 		cpi->version_num = 1; /* XXX??? */
2858 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2859 		cpi->target_sprt = 0;
2860 		/* Not necessary to reset bus, done by HDM initialization */
2861 		cpi->hba_misc = PIM_NOBUSRESET;
2862 		cpi->hba_eng_cnt = 0;
2863 		cpi->max_target = sc->ha_MaxId;
2864 		cpi->max_lun = sc->ha_MaxLun;
2865 		cpi->initiator_id = sc->ha_adapter_target[cam_sim_bus(sim)];
2866 		cpi->bus_id = cam_sim_bus(sim);
2867 		cpi->base_transfer_speed = 3300;
2868 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2869 		strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
2870 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2871 		cpi->unit_number = cam_sim_unit(sim);
2872 		cpi->ccb_h.status = CAM_REQ_CMP;
2873                 cpi->transport = XPORT_SPI;
2874                 cpi->transport_version = 2;
2875                 cpi->protocol = PROTO_SCSI;
2876                 cpi->protocol_version = SCSI_REV_2;
2877 		xpt_done(ccb);
2878 		break;
2879 	}
2880 	default:
2881 		ccb->ccb_h.status = CAM_REQ_INVALID;
2882 		xpt_done(ccb);
2883 		break;
2884 	}
2885 } /* asr_action */
2886 
2887 /*
2888  * Handle processing of current CCB as pointed to by the Status.
2889  */
2890 static int
2891 asr_intr(Asr_softc_t *sc)
2892 {
2893 	int processed;
2894 
2895 	for(processed = 0; asr_get_status(sc) & Mask_InterruptsDisabled;
2896 	    processed = 1) {
2897 		union asr_ccb			   *ccb;
2898 		u_int				    dsc;
2899 		U32				    ReplyOffset;
2900 		PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
2901 
2902 		if (((ReplyOffset = asr_get_FromFIFO(sc)) == EMPTY_QUEUE)
2903 		 && ((ReplyOffset = asr_get_FromFIFO(sc)) == EMPTY_QUEUE)) {
2904 			break;
2905 		}
2906 		Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)(ReplyOffset
2907 		  - sc->ha_Msgs_Phys + (char *)(sc->ha_Msgs));
2908 		/*
2909 		 * We do not need any (optional byteswapping) method access to
2910 		 * the Initiator context field.
2911 		 */
2912 		ccb = (union asr_ccb *)(long)
2913 		  I2O_MESSAGE_FRAME_getInitiatorContext64(
2914 		    &(Reply->StdReplyFrame.StdMessageFrame));
2915 		if (I2O_MESSAGE_FRAME_getMsgFlags(
2916 		  &(Reply->StdReplyFrame.StdMessageFrame))
2917 		  & I2O_MESSAGE_FLAGS_FAIL) {
2918 			I2O_UTIL_NOP_MESSAGE	Message;
2919 			PI2O_UTIL_NOP_MESSAGE	Message_Ptr;
2920 			U32			MessageOffset;
2921 
2922 			MessageOffset = (u_long)
2923 			  I2O_FAILURE_REPLY_MESSAGE_FRAME_getPreservedMFA(
2924 			    (PI2O_FAILURE_REPLY_MESSAGE_FRAME)Reply);
2925 			/*
2926 			 *  Get the Original Message Frame's address, and get
2927 			 * it's Transaction Context into our space. (Currently
2928 			 * unused at original authorship, but better to be
2929 			 * safe than sorry). Straight copy means that we
2930 			 * need not concern ourselves with the (optional
2931 			 * byteswapping) method access.
2932 			 */
2933 			Reply->StdReplyFrame.TransactionContext =
2934 			    bus_space_read_4(sc->ha_frame_btag,
2935 			    sc->ha_frame_bhandle, MessageOffset +
2936 			    offsetof(I2O_SINGLE_REPLY_MESSAGE_FRAME,
2937 			    TransactionContext));
2938 			/*
2939 			 *	For 64 bit machines, we need to reconstruct the
2940 			 * 64 bit context.
2941 			 */
2942 			ccb = (union asr_ccb *)(long)
2943 			  I2O_MESSAGE_FRAME_getInitiatorContext64(
2944 			    &(Reply->StdReplyFrame.StdMessageFrame));
2945 			/*
2946 			 * Unique error code for command failure.
2947 			 */
2948 			I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
2949 			  &(Reply->StdReplyFrame), (u_int16_t)-2);
2950 			/*
2951 			 *  Modify the message frame to contain a NOP and
2952 			 * re-issue it to the controller.
2953 			 */
2954 			Message_Ptr = (PI2O_UTIL_NOP_MESSAGE)ASR_fillMessage(
2955 			    &Message, sizeof(I2O_UTIL_NOP_MESSAGE));
2956 #if (I2O_UTIL_NOP != 0)
2957 				I2O_MESSAGE_FRAME_setFunction (
2958 				  &(Message_Ptr->StdMessageFrame),
2959 				  I2O_UTIL_NOP);
2960 #endif
2961 			/*
2962 			 *  Copy the packet out to the Original Message
2963 			 */
2964 			asr_set_frame(sc, Message_Ptr, MessageOffset,
2965 				      sizeof(I2O_UTIL_NOP_MESSAGE));
2966 			/*
2967 			 *  Issue the NOP
2968 			 */
2969 			asr_set_ToFIFO(sc, MessageOffset);
2970 		}
2971 
2972 		/*
2973 		 *	Asynchronous command with no return requirements,
2974 		 * and a generic handler for immunity against odd error
2975 		 * returns from the adapter.
2976 		 */
2977 		if (ccb == NULL) {
2978 			/*
2979 			 * Return Reply so that it can be used for the
2980 			 * next command
2981 			 */
2982 			asr_set_FromFIFO(sc, ReplyOffset);
2983 			continue;
2984 		}
2985 
2986 		/* Welease Wadjah! (and stop timeouts) */
2987 		ASR_ccbRemove (sc, ccb);
2988 
2989 		dsc = I2O_SINGLE_REPLY_MESSAGE_FRAME_getDetailedStatusCode(
2990 		    &(Reply->StdReplyFrame));
2991 		ccb->csio.scsi_status = dsc & I2O_SCSI_DEVICE_DSC_MASK;
2992 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2993 		switch (dsc) {
2994 
2995 		case I2O_SCSI_DSC_SUCCESS:
2996 			ccb->ccb_h.status |= CAM_REQ_CMP;
2997 			break;
2998 
2999 		case I2O_SCSI_DSC_CHECK_CONDITION:
3000 			ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR |
3001 			    CAM_AUTOSNS_VALID;
3002 			break;
3003 
3004 		case I2O_SCSI_DSC_BUSY:
3005 			/* FALLTHRU */
3006 		case I2O_SCSI_HBA_DSC_ADAPTER_BUSY:
3007 			/* FALLTHRU */
3008 		case I2O_SCSI_HBA_DSC_SCSI_BUS_RESET:
3009 			/* FALLTHRU */
3010 		case I2O_SCSI_HBA_DSC_BUS_BUSY:
3011 			ccb->ccb_h.status |= CAM_SCSI_BUSY;
3012 			break;
3013 
3014 		case I2O_SCSI_HBA_DSC_SELECTION_TIMEOUT:
3015 			ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
3016 			break;
3017 
3018 		case I2O_SCSI_HBA_DSC_COMMAND_TIMEOUT:
3019 			/* FALLTHRU */
3020 		case I2O_SCSI_HBA_DSC_DEVICE_NOT_PRESENT:
3021 			/* FALLTHRU */
3022 		case I2O_SCSI_HBA_DSC_LUN_INVALID:
3023 			/* FALLTHRU */
3024 		case I2O_SCSI_HBA_DSC_SCSI_TID_INVALID:
3025 			ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
3026 			break;
3027 
3028 		case I2O_SCSI_HBA_DSC_DATA_OVERRUN:
3029 			/* FALLTHRU */
3030 		case I2O_SCSI_HBA_DSC_REQUEST_LENGTH_ERROR:
3031 			ccb->ccb_h.status |= CAM_DATA_RUN_ERR;
3032 			break;
3033 
3034 		default:
3035 			ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3036 			break;
3037 		}
3038 		if ((ccb->csio.resid = ccb->csio.dxfer_len) != 0) {
3039 			ccb->csio.resid -=
3040 			  I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getTransferCount(
3041 			    Reply);
3042 		}
3043 
3044 		/* Sense data in reply packet */
3045 		if (ccb->ccb_h.status & CAM_AUTOSNS_VALID) {
3046 			u_int16_t size = I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getAutoSenseTransferCount(Reply);
3047 
3048 			if (size) {
3049 				if (size > sizeof(ccb->csio.sense_data)) {
3050 					size = sizeof(ccb->csio.sense_data);
3051 				}
3052 				if (size > I2O_SCSI_SENSE_DATA_SZ) {
3053 					size = I2O_SCSI_SENSE_DATA_SZ;
3054 				}
3055 				if ((ccb->csio.sense_len)
3056 				 && (size > ccb->csio.sense_len)) {
3057 					size = ccb->csio.sense_len;
3058 				}
3059 				if (size < ccb->csio.sense_len) {
3060 					ccb->csio.sense_resid =
3061 					    ccb->csio.sense_len - size;
3062 				} else {
3063 					ccb->csio.sense_resid = 0;
3064 				}
3065 				bzero(&(ccb->csio.sense_data),
3066 				    sizeof(ccb->csio.sense_data));
3067 				bcopy(Reply->SenseData,
3068 				      &(ccb->csio.sense_data), size);
3069 			}
3070 		}
3071 
3072 		/*
3073 		 * Return Reply so that it can be used for the next command
3074 		 * since we have no more need for it now
3075 		 */
3076 		asr_set_FromFIFO(sc, ReplyOffset);
3077 
3078 		if (ccb->ccb_h.path) {
3079 			xpt_done ((union ccb *)ccb);
3080 		} else {
3081 			wakeup (ccb);
3082 		}
3083 	}
3084 	return (processed);
3085 } /* asr_intr */
3086 
3087 #undef QueueSize	/* Grrrr */
3088 #undef SG_Size		/* Grrrr */
3089 
3090 /*
3091  *	Meant to be included at the bottom of asr.c !!!
3092  */
3093 
3094 /*
3095  *	Included here as hard coded. Done because other necessary include
3096  *	files utilize C++ comment structures which make them a nuisance to
3097  *	included here just to pick up these three typedefs.
3098  */
3099 typedef U32   DPT_TAG_T;
3100 typedef U32   DPT_MSG_T;
3101 typedef U32   DPT_RTN_T;
3102 
3103 #undef SCSI_RESET	/* Conflicts with "scsi/scsiconf.h" defintion */
3104 #include	"dev/raid/asr/osd_unix.h"
3105 
3106 #define	asr_unit(dev)	  minor(dev)
3107 
3108 static u_int8_t ASR_ctlr_held;
3109 
3110 static int
3111 asr_open(struct dev_open_args *ap)
3112 {
3113 	cdev_t dev = ap->a_head.a_dev;
3114 	int		 error;
3115 
3116 	if (dev->si_drv1 == NULL) {
3117 		return (ENODEV);
3118 	}
3119 	crit_enter();
3120 	if (ASR_ctlr_held) {
3121 		error = EBUSY;
3122 	} else if ((error = priv_check_cred(ap->a_cred, PRIV_ROOT, 0)) == 0) {
3123 		++ASR_ctlr_held;
3124 	}
3125 	crit_exit();
3126 	return (error);
3127 } /* asr_open */
3128 
3129 static int
3130 asr_close(struct dev_close_args *ap)
3131 {
3132 
3133 	ASR_ctlr_held = 0;
3134 	return (0);
3135 } /* asr_close */
3136 
3137 
3138 /*-------------------------------------------------------------------------*/
3139 /*		      Function ASR_queue_i				   */
3140 /*-------------------------------------------------------------------------*/
3141 /* The Parameters Passed To This Function Are :				   */
3142 /*     Asr_softc_t *	  : HBA miniport driver's adapter data storage.	   */
3143 /*     PI2O_MESSAGE_FRAME : Msg Structure Pointer For This Command	   */
3144 /*	I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME following the Msg Structure	   */
3145 /*									   */
3146 /* This Function Will Take The User Request Packet And Convert It To An	   */
3147 /* I2O MSG And Send It Off To The Adapter.				   */
3148 /*									   */
3149 /* Return : 0 For OK, Error Code Otherwise				   */
3150 /*-------------------------------------------------------------------------*/
3151 static int
3152 ASR_queue_i(Asr_softc_t	*sc, PI2O_MESSAGE_FRAME	Packet)
3153 {
3154 	union asr_ccb				   * ccb;
3155 	PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME	     Reply;
3156 	PI2O_MESSAGE_FRAME			     Message_Ptr;
3157 	PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME	     Reply_Ptr;
3158 	int					     MessageSizeInBytes;
3159 	int					     ReplySizeInBytes;
3160 	int					     error;
3161 	int					     s;
3162 	/* Scatter Gather buffer list */
3163 	struct ioctlSgList_S {
3164 		SLIST_ENTRY(ioctlSgList_S) link;
3165 		caddr_t			   UserSpace;
3166 		I2O_FLAGS_COUNT		   FlagsCount;
3167 		char			   KernelSpace[sizeof(long)];
3168 	}					   * elm;
3169 	/* Generates a `first' entry */
3170 	SLIST_HEAD(ioctlSgListHead_S, ioctlSgList_S) sgList;
3171 
3172 	if (ASR_getBlinkLedCode(sc)) {
3173 		debug_usr_cmd_printf ("Adapter currently in BlinkLed %x\n",
3174 		  ASR_getBlinkLedCode(sc));
3175 		return (EIO);
3176 	}
3177 	/* Copy in the message into a local allocation */
3178 	if ((Message_Ptr = (PI2O_MESSAGE_FRAME)kmalloc (
3179 	  sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) == NULL) {
3180 		debug_usr_cmd_printf (
3181 		  "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3182 		return (ENOMEM);
3183 	}
3184 	if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3185 	  sizeof(I2O_MESSAGE_FRAME))) != 0) {
3186 		kfree(Message_Ptr, M_TEMP);
3187 		debug_usr_cmd_printf ("Can't copy in packet errno=%d\n", error);
3188 		return (error);
3189 	}
3190 	/* Acquire information to determine type of packet */
3191 	MessageSizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)<<2);
3192 	/* The offset of the reply information within the user packet */
3193 	Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)((char *)Packet
3194 	  + MessageSizeInBytes);
3195 
3196 	/* Check if the message is a synchronous initialization command */
3197 	s = I2O_MESSAGE_FRAME_getFunction(Message_Ptr);
3198 	kfree(Message_Ptr, M_TEMP);
3199 	switch (s) {
3200 
3201 	case I2O_EXEC_IOP_RESET:
3202 	{	U32 status;
3203 
3204 		status = ASR_resetIOP(sc);
3205 		ReplySizeInBytes = sizeof(status);
3206 		debug_usr_cmd_printf ("resetIOP done\n");
3207 		return (copyout ((caddr_t)&status, (caddr_t)Reply,
3208 		  ReplySizeInBytes));
3209 	}
3210 
3211 	case I2O_EXEC_STATUS_GET:
3212 	{	PI2O_EXEC_STATUS_GET_REPLY status;
3213 
3214 		status = &sc->ha_statusmem->status;
3215 		if (ASR_getStatus(sc) == NULL) {
3216 			debug_usr_cmd_printf ("getStatus failed\n");
3217 			return (ENXIO);
3218 		}
3219 		ReplySizeInBytes = sizeof(status);
3220 		debug_usr_cmd_printf ("getStatus done\n");
3221 		return (copyout ((caddr_t)status, (caddr_t)Reply,
3222 		  ReplySizeInBytes));
3223 	}
3224 
3225 	case I2O_EXEC_OUTBOUND_INIT:
3226 	{	U32 status;
3227 
3228 		status = ASR_initOutBound(sc);
3229 		ReplySizeInBytes = sizeof(status);
3230 		debug_usr_cmd_printf ("intOutBound done\n");
3231 		return (copyout ((caddr_t)&status, (caddr_t)Reply,
3232 		  ReplySizeInBytes));
3233 	}
3234 	}
3235 
3236 	/* Determine if the message size is valid */
3237 	if ((MessageSizeInBytes < sizeof(I2O_MESSAGE_FRAME))
3238 	 || (MAX_INBOUND_SIZE < MessageSizeInBytes)) {
3239 		debug_usr_cmd_printf ("Packet size %d incorrect\n",
3240 		  MessageSizeInBytes);
3241 		return (EINVAL);
3242 	}
3243 
3244 	if ((Message_Ptr = (PI2O_MESSAGE_FRAME)kmalloc (MessageSizeInBytes,
3245 	  M_TEMP, M_WAITOK)) == NULL) {
3246 		debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3247 		  MessageSizeInBytes);
3248 		return (ENOMEM);
3249 	}
3250 	if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3251 	  MessageSizeInBytes)) != 0) {
3252 		kfree(Message_Ptr, M_TEMP);
3253 		debug_usr_cmd_printf ("Can't copy in packet[%d] errno=%d\n",
3254 		  MessageSizeInBytes, error);
3255 		return (error);
3256 	}
3257 
3258 	/* Check the size of the reply frame, and start constructing */
3259 
3260 	if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)kmalloc (
3261 	  sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) == NULL) {
3262 		kfree(Message_Ptr, M_TEMP);
3263 		debug_usr_cmd_printf (
3264 		  "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3265 		return (ENOMEM);
3266 	}
3267 	if ((error = copyin ((caddr_t)Reply, (caddr_t)Reply_Ptr,
3268 	  sizeof(I2O_MESSAGE_FRAME))) != 0) {
3269 		kfree(Reply_Ptr, M_TEMP);
3270 		kfree(Message_Ptr, M_TEMP);
3271 		debug_usr_cmd_printf (
3272 		  "Failed to copy in reply frame, errno=%d\n",
3273 		  error);
3274 		return (error);
3275 	}
3276 	ReplySizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(
3277 	  &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) << 2);
3278 	kfree(Reply_Ptr, M_TEMP);
3279 	if (ReplySizeInBytes < sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)) {
3280 		kfree(Message_Ptr, M_TEMP);
3281 		debug_usr_cmd_printf (
3282 		  "Failed to copy in reply frame[%d], errno=%d\n",
3283 		  ReplySizeInBytes, error);
3284 		return (EINVAL);
3285 	}
3286 
3287 	if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)kmalloc (
3288 	  ((ReplySizeInBytes > sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME))
3289 	    ? ReplySizeInBytes : sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)),
3290 	  M_TEMP, M_WAITOK)) == NULL) {
3291 		kfree(Message_Ptr, M_TEMP);
3292 		debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3293 		  ReplySizeInBytes);
3294 		return (ENOMEM);
3295 	}
3296 	(void)ASR_fillMessage((void *)Reply_Ptr, ReplySizeInBytes);
3297 	Reply_Ptr->StdReplyFrame.StdMessageFrame.InitiatorContext
3298 	  = Message_Ptr->InitiatorContext;
3299 	Reply_Ptr->StdReplyFrame.TransactionContext
3300 	  = ((PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr)->TransactionContext;
3301 	I2O_MESSAGE_FRAME_setMsgFlags(
3302 	  &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3303 	  I2O_MESSAGE_FRAME_getMsgFlags(
3304 	    &(Reply_Ptr->StdReplyFrame.StdMessageFrame))
3305 	      | I2O_MESSAGE_FLAGS_REPLY);
3306 
3307 	/* Check if the message is a special case command */
3308 	switch (I2O_MESSAGE_FRAME_getFunction(Message_Ptr)) {
3309 	case I2O_EXEC_SYS_TAB_SET: /* Special Case of empty Scatter Gather */
3310 		if (MessageSizeInBytes == ((I2O_MESSAGE_FRAME_getVersionOffset(
3311 		  Message_Ptr) & 0xF0) >> 2)) {
3312 			kfree(Message_Ptr, M_TEMP);
3313 			I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3314 			  &(Reply_Ptr->StdReplyFrame),
3315 			  (ASR_setSysTab(sc) != CAM_REQ_CMP));
3316 			I2O_MESSAGE_FRAME_setMessageSize(
3317 			  &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3318 			  sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME));
3319 			error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply,
3320 			  ReplySizeInBytes);
3321 			kfree(Reply_Ptr, M_TEMP);
3322 			return (error);
3323 		}
3324 	}
3325 
3326 	/* Deal in the general case */
3327 	/* First allocate and optionally copy in each scatter gather element */
3328 	SLIST_INIT(&sgList);
3329 	if ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) != 0) {
3330 		PI2O_SGE_SIMPLE_ELEMENT sg;
3331 
3332 		/*
3333 		 *	since this code is reused in several systems, code
3334 		 * efficiency is greater by using a shift operation rather
3335 		 * than a divide by sizeof(u_int32_t).
3336 		 */
3337 		sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3338 		  + ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0)
3339 		    >> 2));
3340 		while (sg < (PI2O_SGE_SIMPLE_ELEMENT)(((caddr_t)Message_Ptr)
3341 		  + MessageSizeInBytes)) {
3342 			caddr_t v;
3343 			int	len;
3344 
3345 			if ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3346 			 & I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT) == 0) {
3347 				error = EINVAL;
3348 				break;
3349 			}
3350 			len = I2O_FLAGS_COUNT_getCount(&(sg->FlagsCount));
3351 			debug_usr_cmd_printf ("SG[%d] = %x[%d]\n",
3352 			  sg - (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3353 			  + ((I2O_MESSAGE_FRAME_getVersionOffset(
3354 				Message_Ptr) & 0xF0) >> 2)),
3355 			  I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), len);
3356 
3357 			if ((elm = (struct ioctlSgList_S *)kmalloc (
3358 			  sizeof(*elm) - sizeof(elm->KernelSpace) + len,
3359 			  M_TEMP, M_WAITOK)) == NULL) {
3360 				debug_usr_cmd_printf (
3361 				  "Failed to allocate SG[%d]\n", len);
3362 				error = ENOMEM;
3363 				break;
3364 			}
3365 			SLIST_INSERT_HEAD(&sgList, elm, link);
3366 			elm->FlagsCount = sg->FlagsCount;
3367 			elm->UserSpace = (caddr_t)
3368 			  (I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg));
3369 			v = elm->KernelSpace;
3370 			/* Copy in outgoing data (DIR bit could be invalid) */
3371 			if ((error = copyin (elm->UserSpace, (caddr_t)v, len))
3372 			  != 0) {
3373 				break;
3374 			}
3375 			/*
3376 			 *	If the buffer is not contiguous, lets
3377 			 * break up the scatter/gather entries.
3378 			 */
3379 			while ((len > 0)
3380 			 && (sg < (PI2O_SGE_SIMPLE_ELEMENT)
3381 			  (((caddr_t)Message_Ptr) + MAX_INBOUND_SIZE))) {
3382 				int next, base, span;
3383 
3384 				span = 0;
3385 				next = base = KVTOPHYS(v);
3386 				I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg,
3387 				  base);
3388 
3389 				/* How far can we go physically contiguously */
3390 				while ((len > 0) && (base == next)) {
3391 					int size;
3392 
3393 					next = trunc_page(base) + PAGE_SIZE;
3394 					size = next - base;
3395 					if (size > len) {
3396 						size = len;
3397 					}
3398 					span += size;
3399 					v += size;
3400 					len -= size;
3401 					base = KVTOPHYS(v);
3402 				}
3403 
3404 				/* Construct the Flags */
3405 				I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount),
3406 				  span);
3407 				{
3408 					int flags = I2O_FLAGS_COUNT_getFlags(
3409 					  &(elm->FlagsCount));
3410 					/* Any remaining length? */
3411 					if (len > 0) {
3412 					    flags &=
3413 						~(I2O_SGL_FLAGS_END_OF_BUFFER
3414 						 | I2O_SGL_FLAGS_LAST_ELEMENT);
3415 					}
3416 					I2O_FLAGS_COUNT_setFlags(
3417 					  &(sg->FlagsCount), flags);
3418 				}
3419 
3420 				debug_usr_cmd_printf ("sg[%d] = %x[%d]\n",
3421 				  sg - (PI2O_SGE_SIMPLE_ELEMENT)
3422 				    ((char *)Message_Ptr
3423 				  + ((I2O_MESSAGE_FRAME_getVersionOffset(
3424 					Message_Ptr) & 0xF0) >> 2)),
3425 				  I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg),
3426 				  span);
3427 				if (len <= 0) {
3428 					break;
3429 				}
3430 
3431 				/*
3432 				 * Incrementing requires resizing of the
3433 				 * packet, and moving up the existing SG
3434 				 * elements.
3435 				 */
3436 				++sg;
3437 				MessageSizeInBytes += sizeof(*sg);
3438 				I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
3439 				  I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)
3440 				  + (sizeof(*sg) / sizeof(U32)));
3441 				{
3442 					PI2O_MESSAGE_FRAME NewMessage_Ptr;
3443 
3444 					if ((NewMessage_Ptr
3445 					  = (PI2O_MESSAGE_FRAME)
3446 					    kmalloc (MessageSizeInBytes,
3447 					     M_TEMP, M_WAITOK)) == NULL) {
3448 						debug_usr_cmd_printf (
3449 						  "Failed to acquire frame[%d] memory\n",
3450 						  MessageSizeInBytes);
3451 						error = ENOMEM;
3452 						break;
3453 					}
3454 					span = ((caddr_t)sg)
3455 					     - (caddr_t)Message_Ptr;
3456 					bcopy(Message_Ptr,NewMessage_Ptr, span);
3457 					bcopy((caddr_t)(sg-1),
3458 					  ((caddr_t)NewMessage_Ptr) + span,
3459 					  MessageSizeInBytes - span);
3460 					kfree(Message_Ptr, M_TEMP);
3461 					sg = (PI2O_SGE_SIMPLE_ELEMENT)
3462 					  (((caddr_t)NewMessage_Ptr) + span);
3463 					Message_Ptr = NewMessage_Ptr;
3464 				}
3465 			}
3466 			if ((error)
3467 			 || ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3468 			  & I2O_SGL_FLAGS_LAST_ELEMENT) != 0)) {
3469 				break;
3470 			}
3471 			++sg;
3472 		}
3473 		if (error) {
3474 			while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3475 				SLIST_REMOVE_HEAD(&sgList, link);
3476 				kfree(elm, M_TEMP);
3477 			}
3478 			kfree(Reply_Ptr, M_TEMP);
3479 			kfree(Message_Ptr, M_TEMP);
3480 			return (error);
3481 		}
3482 	}
3483 
3484 	debug_usr_cmd_printf ("Inbound: ");
3485 	debug_usr_cmd_dump_message(Message_Ptr);
3486 
3487 	/* Send the command */
3488 	if ((ccb = asr_alloc_ccb (sc)) == NULL) {
3489 		/* Free up in-kernel buffers */
3490 		while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3491 			SLIST_REMOVE_HEAD(&sgList, link);
3492 			kfree(elm, M_TEMP);
3493 		}
3494 		kfree(Reply_Ptr, M_TEMP);
3495 		kfree(Message_Ptr, M_TEMP);
3496 		return (ENOMEM);
3497 	}
3498 
3499 	/*
3500 	 * We do not need any (optional byteswapping) method access to
3501 	 * the Initiator context field.
3502 	 */
3503 	I2O_MESSAGE_FRAME_setInitiatorContext64(
3504 	  (PI2O_MESSAGE_FRAME)Message_Ptr, (long)ccb);
3505 
3506 	(void)ASR_queue (sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
3507 
3508 	kfree(Message_Ptr, M_TEMP);
3509 
3510 	/*
3511 	 * Wait for the board to report a finished instruction.
3512 	 */
3513 	crit_enter();
3514 	while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
3515 		if (ASR_getBlinkLedCode(sc)) {
3516 			/* Reset Adapter */
3517 			kprintf ("asr%d: Blink LED 0x%x resetting adapter\n",
3518 			  cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
3519 			  ASR_getBlinkLedCode(sc));
3520 			if (ASR_reset (sc) == ENXIO) {
3521 				/* Command Cleanup */
3522 				ASR_ccbRemove(sc, ccb);
3523 			}
3524 			crit_exit();
3525 			/* Free up in-kernel buffers */
3526 			while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3527 				SLIST_REMOVE_HEAD(&sgList, link);
3528 				kfree(elm, M_TEMP);
3529 			}
3530 			kfree(Reply_Ptr, M_TEMP);
3531 			asr_free_ccb(ccb);
3532 			return (EIO);
3533 		}
3534 		/* Check every second for BlinkLed */
3535 		/* There is no PRICAM, but outwardly PRIBIO is functional */
3536 		tsleep(ccb, 0, "asr", hz);
3537 	}
3538 	crit_exit();
3539 
3540 	debug_usr_cmd_printf ("Outbound: ");
3541 	debug_usr_cmd_dump_message(Reply_Ptr);
3542 
3543 	I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3544 	  &(Reply_Ptr->StdReplyFrame),
3545 	  (ccb->ccb_h.status != CAM_REQ_CMP));
3546 
3547 	if (ReplySizeInBytes >= (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3548 	  - I2O_SCSI_SENSE_DATA_SZ - sizeof(U32))) {
3549 		I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setTransferCount(Reply_Ptr,
3550 		  ccb->csio.dxfer_len - ccb->csio.resid);
3551 	}
3552 	if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) && (ReplySizeInBytes
3553 	 > (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3554 	 - I2O_SCSI_SENSE_DATA_SZ))) {
3555 		int size = ReplySizeInBytes
3556 		  - sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3557 		  - I2O_SCSI_SENSE_DATA_SZ;
3558 
3559 		if (size > sizeof(ccb->csio.sense_data)) {
3560 			size = sizeof(ccb->csio.sense_data);
3561 		}
3562 		if (size < ccb->csio.sense_len) {
3563 			ccb->csio.sense_resid = ccb->csio.sense_len - size;
3564 		} else {
3565 			ccb->csio.sense_resid = 0;
3566 		}
3567 		bzero(&(ccb->csio.sense_data), sizeof(ccb->csio.sense_data));
3568 		bcopy(&(ccb->csio.sense_data), Reply_Ptr->SenseData, size);
3569 		I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setAutoSenseTransferCount(
3570 		    Reply_Ptr, size);
3571 	}
3572 
3573 	/* Free up in-kernel buffers */
3574 	while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3575 		/* Copy out as necessary */
3576 		if ((error == 0)
3577 		/* DIR bit considered `valid', error due to ignorance works */
3578 		 && ((I2O_FLAGS_COUNT_getFlags(&(elm->FlagsCount))
3579 		  & I2O_SGL_FLAGS_DIR) == 0)) {
3580 			error = copyout((caddr_t)(elm->KernelSpace),
3581 			  elm->UserSpace,
3582 			  I2O_FLAGS_COUNT_getCount(&(elm->FlagsCount)));
3583 		}
3584 		SLIST_REMOVE_HEAD(&sgList, link);
3585 		kfree(elm, M_TEMP);
3586 	}
3587 	if (error == 0) {
3588 	/* Copy reply frame to user space */
3589 		error = copyout((caddr_t)Reply_Ptr, (caddr_t)Reply,
3590 				ReplySizeInBytes);
3591 	}
3592 	kfree(Reply_Ptr, M_TEMP);
3593 	asr_free_ccb(ccb);
3594 
3595 	return (error);
3596 } /* ASR_queue_i */
3597 
3598 /*----------------------------------------------------------------------*/
3599 /*			    Function asr_ioctl			       */
3600 /*----------------------------------------------------------------------*/
3601 /* The parameters passed to this function are :				*/
3602 /*     dev  : Device number.						*/
3603 /*     cmd  : Ioctl Command						*/
3604 /*     data : User Argument Passed In.					*/
3605 /*     flag : Mode Parameter						*/
3606 /*     proc : Process Parameter						*/
3607 /*									*/
3608 /* This function is the user interface into this adapter driver		*/
3609 /*									*/
3610 /* Return : zero if OK, error code if not				*/
3611 /*----------------------------------------------------------------------*/
3612 
3613 static int
3614 asr_ioctl(struct dev_ioctl_args *ap)
3615 {
3616 	cdev_t dev = ap->a_head.a_dev;
3617 	u_long cmd = ap->a_cmd;
3618 	caddr_t data = ap->a_data;
3619 	Asr_softc_t	*sc = dev->si_drv1;
3620 	int		i, error = 0;
3621 #ifdef ASR_IOCTL_COMPAT
3622 	int		j;
3623 #endif /* ASR_IOCTL_COMPAT */
3624 
3625 	if (sc != NULL)
3626 	switch(cmd) {
3627 
3628 	case DPT_SIGNATURE:
3629 #ifdef ASR_IOCTL_COMPAT
3630 #if (dsDescription_size != 50)
3631 	case DPT_SIGNATURE + ((50 - dsDescription_size) << 16):
3632 #endif
3633 		if (cmd & 0xFFFF0000) {
3634 			bcopy(&ASR_sig, data, sizeof(dpt_sig_S));
3635 			return (0);
3636 		}
3637 	/* Traditional version of the ioctl interface */
3638 	case DPT_SIGNATURE & 0x0000FFFF:
3639 #endif
3640 		return (copyout((caddr_t)(&ASR_sig), *((caddr_t *)data),
3641 				sizeof(dpt_sig_S)));
3642 
3643 	/* Traditional version of the ioctl interface */
3644 	case DPT_CTRLINFO & 0x0000FFFF:
3645 	case DPT_CTRLINFO: {
3646 		struct {
3647 			u_int16_t length;
3648 			u_int16_t drvrHBAnum;
3649 			u_int32_t baseAddr;
3650 			u_int16_t blinkState;
3651 			u_int8_t  pciBusNum;
3652 			u_int8_t  pciDeviceNum;
3653 			u_int16_t hbaFlags;
3654 			u_int16_t Interrupt;
3655 			u_int32_t reserved1;
3656 			u_int32_t reserved2;
3657 			u_int32_t reserved3;
3658 		} CtlrInfo;
3659 
3660 		bzero(&CtlrInfo, sizeof(CtlrInfo));
3661 		CtlrInfo.length = sizeof(CtlrInfo) - sizeof(u_int16_t);
3662 		CtlrInfo.drvrHBAnum = asr_unit(dev);
3663 		CtlrInfo.baseAddr = sc->ha_Base;
3664 		i = ASR_getBlinkLedCode (sc);
3665 		if (i == -1)
3666 			i = 0;
3667 
3668 		CtlrInfo.blinkState = i;
3669 		CtlrInfo.pciBusNum = sc->ha_pciBusNum;
3670 		CtlrInfo.pciDeviceNum = sc->ha_pciDeviceNum;
3671 #define	FLG_OSD_PCI_VALID 0x0001
3672 #define	FLG_OSD_DMA	  0x0002
3673 #define	FLG_OSD_I2O	  0x0004
3674 		CtlrInfo.hbaFlags = FLG_OSD_PCI_VALID|FLG_OSD_DMA|FLG_OSD_I2O;
3675 		CtlrInfo.Interrupt = sc->ha_irq;
3676 #ifdef ASR_IOCTL_COMPAT
3677 		if (cmd & 0xffff0000)
3678 			bcopy(&CtlrInfo, data, sizeof(CtlrInfo));
3679 		else
3680 #endif /* ASR_IOCTL_COMPAT */
3681 		error = copyout(&CtlrInfo, *(caddr_t *)data, sizeof(CtlrInfo));
3682 	}	return (error);
3683 
3684 	/* Traditional version of the ioctl interface */
3685 	case DPT_SYSINFO & 0x0000FFFF:
3686 	case DPT_SYSINFO: {
3687 		sysInfo_S	Info;
3688 #ifdef ASR_IOCTL_COMPAT
3689 		char	      * cp;
3690 		/* Kernel Specific ptok `hack' */
3691 #define		ptok(a) ((char *)(uintptr_t)(a) + KERNBASE)
3692 
3693 		bzero(&Info, sizeof(Info));
3694 
3695 		/* Appears I am the only person in the Kernel doing this */
3696 		outb (0x70, 0x12);
3697 		i = inb(0x71);
3698 		j = i >> 4;
3699 		if (i == 0x0f) {
3700 			outb (0x70, 0x19);
3701 			j = inb (0x71);
3702 		}
3703 		Info.drive0CMOS = j;
3704 
3705 		j = i & 0x0f;
3706 		if (i == 0x0f) {
3707 			outb (0x70, 0x1a);
3708 			j = inb (0x71);
3709 		}
3710 		Info.drive1CMOS = j;
3711 
3712 		Info.numDrives = *((char *)ptok(0x475));
3713 #else /* ASR_IOCTL_COMPAT */
3714 		bzero(&Info, sizeof(Info));
3715 #endif /* ASR_IOCTL_COMPAT */
3716 
3717 		Info.processorFamily = ASR_sig.dsProcessorFamily;
3718 #if defined(__i386__)
3719 		switch (cpu) {
3720 		case CPU_386SX: case CPU_386:
3721 			Info.processorType = PROC_386; break;
3722 		case CPU_486SX: case CPU_486:
3723 			Info.processorType = PROC_486; break;
3724 		case CPU_586:
3725 			Info.processorType = PROC_PENTIUM; break;
3726 		case CPU_686:
3727 			Info.processorType = PROC_SEXIUM; break;
3728 		}
3729 #endif
3730 
3731 		Info.osType = OS_BSDI_UNIX;
3732 		Info.osMajorVersion = osrelease[0] - '0';
3733 		Info.osMinorVersion = osrelease[2] - '0';
3734 		/* Info.osRevision = 0; */
3735 		/* Info.osSubRevision = 0; */
3736 		Info.busType = SI_PCI_BUS;
3737 		Info.flags = SI_OSversionValid|SI_BusTypeValid|SI_NO_SmartROM;
3738 
3739 #ifdef ASR_IOCTL_COMPAT
3740 		Info.flags |= SI_CMOS_Valid | SI_NumDrivesValid;
3741 		/* Go Out And Look For I2O SmartROM */
3742 		for(j = 0xC8000; j < 0xE0000; j += 2048) {
3743 			int k;
3744 
3745 			cp = ptok(j);
3746 			if (*((unsigned short *)cp) != 0xAA55) {
3747 				continue;
3748 			}
3749 			j += (cp[2] * 512) - 2048;
3750 			if ((*((u_long *)(cp + 6))
3751 			  != ('S' + (' ' * 256) + (' ' * 65536L)))
3752 			 || (*((u_long *)(cp + 10))
3753 			  != ('I' + ('2' * 256) + ('0' * 65536L)))) {
3754 				continue;
3755 			}
3756 			cp += 0x24;
3757 			for (k = 0; k < 64; ++k) {
3758 				if (*((unsigned short *)cp)
3759 				 == (' ' + ('v' * 256))) {
3760 					break;
3761 				}
3762 			}
3763 			if (k < 64) {
3764 				Info.smartROMMajorVersion
3765 				    = *((unsigned char *)(cp += 4)) - '0';
3766 				Info.smartROMMinorVersion
3767 				    = *((unsigned char *)(cp += 2));
3768 				Info.smartROMRevision
3769 				    = *((unsigned char *)(++cp));
3770 				Info.flags |= SI_SmartROMverValid;
3771 				Info.flags &= ~SI_NO_SmartROM;
3772 				break;
3773 			}
3774 		}
3775 		/* Get The Conventional Memory Size From CMOS */
3776 		outb (0x70, 0x16);
3777 		j = inb (0x71);
3778 		j <<= 8;
3779 		outb (0x70, 0x15);
3780 		j |= inb(0x71);
3781 		Info.conventionalMemSize = j;
3782 
3783 		/* Get The Extended Memory Found At Power On From CMOS */
3784 		outb (0x70, 0x31);
3785 		j = inb (0x71);
3786 		j <<= 8;
3787 		outb (0x70, 0x30);
3788 		j |= inb(0x71);
3789 		Info.extendedMemSize = j;
3790 		Info.flags |= SI_MemorySizeValid;
3791 
3792 		/* Copy Out The Info Structure To The User */
3793 		if (cmd & 0xFFFF0000)
3794 			bcopy(&Info, data, sizeof(Info));
3795 		else
3796 #endif /* ASR_IOCTL_COMPAT */
3797 		error = copyout(&Info, *(caddr_t *)data, sizeof(Info));
3798 		return (error); }
3799 
3800 		/* Get The BlinkLED State */
3801 	case DPT_BLINKLED:
3802 		i = ASR_getBlinkLedCode (sc);
3803 		if (i == -1)
3804 			i = 0;
3805 #ifdef ASR_IOCTL_COMPAT
3806 		if (cmd & 0xffff0000)
3807 			bcopy(&i, data, sizeof(i));
3808 		else
3809 #endif /* ASR_IOCTL_COMPAT */
3810 		error = copyout(&i, *(caddr_t *)data, sizeof(i));
3811 		break;
3812 
3813 		/* Send an I2O command */
3814 	case I2OUSRCMD:
3815 		return (ASR_queue_i(sc, *((PI2O_MESSAGE_FRAME *)data)));
3816 
3817 		/* Reset and re-initialize the adapter */
3818 	case I2ORESETCMD:
3819 		return (ASR_reset(sc));
3820 
3821 		/* Rescan the LCT table and resynchronize the information */
3822 	case I2ORESCANCMD:
3823 		return (ASR_rescan(sc));
3824 	}
3825 	return (EINVAL);
3826 } /* asr_ioctl */
3827