xref: /dflybsd-src/sys/dev/raid/arcmsr/arcmsr.c (revision 330d3c4b487f3fc5d0eb023645b0b2a569f7048e)
1 /*
2 *****************************************************************************************
3 **        O.S   : FreeBSD
4 **   FILE NAME  : arcmsr.c
5 **        BY    : Erich Chen, Ching Huang
6 **   Description: SCSI RAID Device Driver for
7 **                ARECA (ARC11XX/ARC12XX/ARC13XX/ARC16XX/ARC188x) SATA/SAS RAID HOST Adapter
8 **                ARCMSR RAID Host adapter
9 **                [RAID controller:INTEL 331(PCI-X) 341(PCI-EXPRESS) chip set]
10 ******************************************************************************************
11 ************************************************************************
12 **
13 ** Copyright (c) 2004-2010 ARECA Co. Ltd.
14 **        Erich Chen, Taipei Taiwan All rights reserved.
15 **
16 ** Redistribution and use in source and binary forms, with or without
17 ** modification, are permitted provided that the following conditions
18 ** are met:
19 ** 1. Redistributions of source code must retain the above copyright
20 **    notice, this list of conditions and the following disclaimer.
21 ** 2. Redistributions in binary form must reproduce the above copyright
22 **    notice, this list of conditions and the following disclaimer in the
23 **    documentation and/or other materials provided with the distribution.
24 ** 3. The name of the author may not be used to endorse or promote products
25 **    derived from this software without specific prior written permission.
26 **
27 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT
32 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
34 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 **(INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
36 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 **************************************************************************
38 ** History
39 **
40 **        REV#         DATE	            NAME	         DESCRIPTION
41 **     1.00.00.00    3/31/2004	       Erich Chen	     First release
42 **     1.20.00.02   11/29/2004         Erich Chen        bug fix with arcmsr_bus_reset when PHY error
43 **     1.20.00.03    4/19/2005         Erich Chen        add SATA 24 Ports adapter type support
44 **                                                       clean unused function
45 **     1.20.00.12    9/12/2005         Erich Chen        bug fix with abort command handling,
46 **                                                       firmware version check
47 **                                                       and firmware update notify for hardware bug fix
48 **                                                       handling if none zero high part physical address
49 **                                                       of srb resource
50 **     1.20.00.13    8/18/2006         Erich Chen        remove pending srb and report busy
51 **                                                       add iop message xfer
52 **                                                       with scsi pass-through command
53 **                                                       add new device id of sas raid adapters
54 **                                                       code fit for SPARC64 & PPC
55 **     1.20.00.14   02/05/2007         Erich Chen        bug fix for incorrect ccb_h.status report
56 **                                                       and cause g_vfs_done() read write error
57 **     1.20.00.15   10/10/2007         Erich Chen        support new RAID adapter type ARC120x
58 **     1.20.00.16   10/10/2009         Erich Chen        Bug fix for RAID adapter type ARC120x
59 **                                                       bus_dmamem_alloc() with BUS_DMA_ZERO
60 **     1.20.00.17   07/15/2010         Ching Huang       Added support ARC1880
61 **							 report CAM_DEV_NOT_THERE instead of CAM_SEL_TIMEOUT when device failed,
62 **							 prevent cam_periph_error removing all LUN devices of one Target id
63 **							 for any one LUN device failed
64 ******************************************************************************************
65 * $FreeBSD: src/sys/dev/arcmsr/arcmsr.c,v 1.34 2010/07/21 18:50:24 delphij Exp $
66 */
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/malloc.h>
70 #include <sys/kernel.h>
71 #include <sys/bus.h>
72 #include <sys/queue.h>
73 #include <sys/stat.h>
74 #include <sys/devicestat.h>
75 #include <sys/kthread.h>
76 #include <sys/module.h>
77 #include <sys/proc.h>
78 #include <sys/lock.h>
79 #include <sys/sysctl.h>
80 #include <sys/thread2.h>
81 #include <sys/poll.h>
82 #include <sys/ioccom.h>
83 #include <sys/device.h>
84 #include <vm/vm.h>
85 #include <vm/vm_param.h>
86 #include <vm/pmap.h>
87 
88 #include <machine/atomic.h>
89 #include <sys/conf.h>
90 #include <sys/rman.h>
91 
92 #include <bus/cam/cam.h>
93 #include <bus/cam/cam_ccb.h>
94 #include <bus/cam/cam_sim.h>
95 #include <bus/cam/cam_periph.h>
96 #include <bus/cam/cam_xpt_periph.h>
97 #include <bus/cam/cam_xpt_sim.h>
98 #include <bus/cam/cam_debug.h>
99 #include <bus/cam/scsi/scsi_all.h>
100 #include <bus/cam/scsi/scsi_message.h>
101 /*
102 **************************************************************************
103 **************************************************************************
104 */
105 #include <sys/endian.h>
106 #include <bus/pci/pcivar.h>
107 #include <bus/pci/pcireg.h>
108 #define ARCMSR_LOCK_INIT(l, s)	lockinit(l, s, 0, LK_CANRECURSE)
109 #define ARCMSR_LOCK_DESTROY(l)	lockuninit(l)
110 #define ARCMSR_LOCK_ACQUIRE(l)	lockmgr(l, LK_EXCLUSIVE)
111 #define ARCMSR_LOCK_RELEASE(l)	lockmgr(l, LK_RELEASE)
112 #define ARCMSR_LOCK_TRY(l)	lockmgr(&l, LK_EXCLUSIVE|LK_NOWAIT);
113 #define arcmsr_htole32(x)	htole32(x)
114 typedef struct lock		arcmsr_lock_t;
115 
116 #if !defined(CAM_NEW_TRAN_CODE)
117 #define	CAM_NEW_TRAN_CODE	1
118 #endif
119 
120 #include <dev/raid/arcmsr/arcmsr.h>
121 #define ARCMSR_SRBS_POOL_SIZE           ((sizeof(struct CommandControlBlock) * ARCMSR_MAX_FREESRB_NUM))
122 /*
123 **************************************************************************
124 **************************************************************************
125 */
126 #define CHIP_REG_READ32(s, b, r)	bus_space_read_4(acb->btag[b], acb->bhandle[b], offsetof(struct s, r))
127 #define CHIP_REG_WRITE32(s, b, r, d)	bus_space_write_4(acb->btag[b], acb->bhandle[b], offsetof(struct s, r), d)
128 /*
129 **************************************************************************
130 **************************************************************************
131 */
132 static struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb);
133 static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb);
134 static int arcmsr_probe(device_t dev);
135 static int arcmsr_attach(device_t dev);
136 static int arcmsr_detach(device_t dev);
137 static u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg);
138 static void arcmsr_iop_parking(struct AdapterControlBlock *acb);
139 static int arcmsr_shutdown(device_t dev);
140 static void arcmsr_interrupt(struct AdapterControlBlock *acb);
141 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb);
142 static void arcmsr_free_resource(struct AdapterControlBlock *acb);
143 static void arcmsr_bus_reset(struct AdapterControlBlock *acb);
144 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
145 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
146 static void arcmsr_iop_init(struct AdapterControlBlock *acb);
147 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb);
148 static void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb);
149 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb);
150 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag);
151 static void arcmsr_iop_reset(struct AdapterControlBlock *acb);
152 static void arcmsr_report_sense_info(struct CommandControlBlock *srb);
153 static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t * dm_segs, u_int32_t nseg);
154 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb);
155 static int arcmsr_resume(device_t dev);
156 static int arcmsr_suspend(device_t dev);
157 static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb);
158 static void	arcmsr_polling_devmap(void* arg);
159 /*
160 **************************************************************************
161 **************************************************************************
162 */
163 static void UDELAY(u_int32_t us) { DELAY(us); }
164 /*
165 **************************************************************************
166 **************************************************************************
167 */
168 static bus_dmamap_callback_t arcmsr_map_freesrb;
169 static bus_dmamap_callback_t arcmsr_executesrb;
170 /*
171 **************************************************************************
172 **************************************************************************
173 */
174 static d_open_t	arcmsr_open;
175 static d_close_t arcmsr_close;
176 static d_ioctl_t arcmsr_ioctl;
177 
178 static device_method_t arcmsr_methods[]={
179 	DEVMETHOD(device_probe,		arcmsr_probe),
180 	DEVMETHOD(device_attach,	arcmsr_attach),
181 	DEVMETHOD(device_detach,	arcmsr_detach),
182 	DEVMETHOD(device_shutdown,	arcmsr_shutdown),
183 	DEVMETHOD(device_suspend,	arcmsr_suspend),
184 	DEVMETHOD(device_resume,	arcmsr_resume),
185 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
186 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
187 	{ 0, 0 }
188 };
189 
190 static driver_t arcmsr_driver={
191 	"arcmsr", arcmsr_methods, sizeof(struct AdapterControlBlock)
192 };
193 
194 static devclass_t arcmsr_devclass;
195 DRIVER_MODULE(arcmsr, pci, arcmsr_driver, arcmsr_devclass, 0, 0);
196 MODULE_DEPEND(arcmsr, pci, 1, 1, 1);
197 MODULE_DEPEND(arcmsr, cam, 1, 1, 1);
198 #ifndef BUS_DMA_COHERENT
199 	#define	BUS_DMA_COHERENT	0x04	/* hint: map memory in a coherent way */
200 #endif
201 
202 static struct dev_ops arcmsr_ops = {
203 	{ "arcmsr", 0, 0 },
204 	.d_open =	arcmsr_open,		        /* open     */
205 	.d_close =	arcmsr_close,		        /* close    */
206 	.d_ioctl =	arcmsr_ioctl,		        /* ioctl    */
207 };
208 
209 /*
210 **************************************************************************
211 **************************************************************************
212 */
213 
214 static int
215 arcmsr_open(struct dev_open_args *ap)
216 {
217 	cdev_t dev = ap->a_head.a_dev;
218 	struct AdapterControlBlock *acb=dev->si_drv1;
219 
220 	if(acb==NULL) {
221 		return ENXIO;
222 	}
223 	return 0;
224 }
225 
226 /*
227 **************************************************************************
228 **************************************************************************
229 */
230 
231 static int
232 arcmsr_close(struct dev_close_args *ap)
233 {
234 	cdev_t dev = ap->a_head.a_dev;
235 	struct AdapterControlBlock *acb=dev->si_drv1;
236 
237 	if(acb==NULL) {
238 		return ENXIO;
239 	}
240 	return 0;
241 }
242 
243 /*
244 **************************************************************************
245 **************************************************************************
246 */
247 
248 static int
249 arcmsr_ioctl(struct dev_ioctl_args *ap)
250 {
251 	cdev_t dev = ap->a_head.a_dev;
252 	u_long ioctl_cmd = ap->a_cmd;
253 	caddr_t arg = ap->a_data;
254 	struct AdapterControlBlock *acb=dev->si_drv1;
255 
256 	if(acb==NULL) {
257 		return ENXIO;
258 	}
259 	return(arcmsr_iop_ioctlcmd(acb, ioctl_cmd, arg));
260 }
261 
262 /*
263 **********************************************************************
264 **********************************************************************
265 */
266 static u_int32_t arcmsr_disable_allintr( struct AdapterControlBlock *acb)
267 {
268 	u_int32_t intmask_org=0;
269 
270 	switch (acb->adapter_type) {
271 	case ACB_ADAPTER_TYPE_A: {
272 			/* disable all outbound interrupt */
273 			intmask_org=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intmask); /* disable outbound message0 int */
274 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE);
275 		}
276 		break;
277 	case ACB_ADAPTER_TYPE_B: {
278 			/* disable all outbound interrupt */
279 			intmask_org=CHIP_REG_READ32(HBB_DOORBELL,
280 			0, iop2drv_doorbell_mask) & (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); /* disable outbound message0 int */
281 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell_mask, 0); /* disable all interrupt */
282 		}
283 		break;
284 	case ACB_ADAPTER_TYPE_C: {
285 			/* disable all outbound interrupt */
286 			intmask_org=CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_mask)	; /* disable outbound message0 int */
287 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE);
288 		}
289 		break;
290 	}
291 	return(intmask_org);
292 }
293 /*
294 **********************************************************************
295 **********************************************************************
296 */
297 static void arcmsr_enable_allintr( struct AdapterControlBlock *acb, u_int32_t intmask_org)
298 {
299 	u_int32_t mask;
300 
301 	switch (acb->adapter_type) {
302 	case ACB_ADAPTER_TYPE_A: {
303 			/* enable outbound Post Queue, outbound doorbell Interrupt */
304 			mask=~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE|ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
305 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org & mask);
306 			acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
307 		}
308 		break;
309 	case ACB_ADAPTER_TYPE_B: {
310 			/* enable ARCMSR_IOP2DRV_MESSAGE_CMD_DONE */
311 			mask=(ARCMSR_IOP2DRV_DATA_WRITE_OK|ARCMSR_IOP2DRV_DATA_READ_OK|ARCMSR_IOP2DRV_CDB_DONE|ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
312 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell_mask, intmask_org | mask); /*1=interrupt enable, 0=interrupt disable*/
313 			acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
314 		}
315 		break;
316 	case ACB_ADAPTER_TYPE_C: {
317 			/* enable outbound Post Queue, outbound doorbell Interrupt */
318 			mask=~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
319 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org & mask);
320 			acb->outbound_int_enable= ~(intmask_org & mask) & 0x0000000f;
321 		}
322 		break;
323 	}
324 	return;
325 }
326 /*
327 **********************************************************************
328 **********************************************************************
329 */
330 static u_int8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
331 {
332 	u_int32_t Index;
333 	u_int8_t Retries=0x00;
334 
335 	do {
336 		for(Index=0; Index < 100; Index++) {
337 			if(CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
338 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);/*clear interrupt*/
339 				return TRUE;
340 			}
341 			UDELAY(10000);
342 		}/*max 1 seconds*/
343 	}while(Retries++ < 20);/*max 20 sec*/
344 	return FALSE;
345 }
346 /*
347 **********************************************************************
348 **********************************************************************
349 */
350 static u_int8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
351 {
352 	u_int32_t Index;
353 	u_int8_t Retries=0x00;
354 
355 	do {
356 		for(Index=0; Index < 100; Index++) {
357 			if(CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
358 				CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt*/
359 				CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
360 				return TRUE;
361 			}
362 			UDELAY(10000);
363 		}/*max 1 seconds*/
364 	}while(Retries++ < 20);/*max 20 sec*/
365 	return FALSE;
366 }
367 /*
368 **********************************************************************
369 **********************************************************************
370 */
371 static u_int8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *acb)
372 {
373 	u_int32_t Index;
374 	u_int8_t Retries=0x00;
375 
376 	do {
377 		for(Index=0; Index < 100; Index++) {
378 			if(CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
379 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);/*clear interrupt*/
380 				return TRUE;
381 			}
382 			UDELAY(10000);
383 		}/*max 1 seconds*/
384 	}while(Retries++ < 20);/*max 20 sec*/
385 	return FALSE;
386 }
387 /*
388 ************************************************************************
389 ************************************************************************
390 */
391 static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
392 {
393 	int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */
394 
395 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
396 	do {
397 		if(arcmsr_hba_wait_msgint_ready(acb)) {
398 			break;
399 		} else {
400 			retry_count--;
401 		}
402 	}while(retry_count!=0);
403 	return;
404 }
405 /*
406 ************************************************************************
407 ************************************************************************
408 */
409 static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
410 {
411 	int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */
412 
413 	CHIP_REG_WRITE32(HBB_DOORBELL,
414 	0, drv2iop_doorbell, ARCMSR_MESSAGE_FLUSH_CACHE);
415 	do {
416 		if(arcmsr_hbb_wait_msgint_ready(acb)) {
417 			break;
418 		} else {
419 			retry_count--;
420 		}
421 	}while(retry_count!=0);
422 	return;
423 }
424 /*
425 ************************************************************************
426 ************************************************************************
427 */
428 static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *acb)
429 {
430 	int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */
431 
432 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
433 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
434 	do {
435 		if(arcmsr_hbc_wait_msgint_ready(acb)) {
436 			break;
437 		} else {
438 			retry_count--;
439 		}
440 	}while(retry_count!=0);
441 	return;
442 }
443 /*
444 ************************************************************************
445 ************************************************************************
446 */
447 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
448 {
449 	switch (acb->adapter_type) {
450 	case ACB_ADAPTER_TYPE_A: {
451 			arcmsr_flush_hba_cache(acb);
452 		}
453 		break;
454 	case ACB_ADAPTER_TYPE_B: {
455 			arcmsr_flush_hbb_cache(acb);
456 		}
457 		break;
458 	case ACB_ADAPTER_TYPE_C: {
459 			arcmsr_flush_hbc_cache(acb);
460 		}
461 		break;
462 	}
463 	return;
464 }
465 /*
466 *******************************************************************************
467 *******************************************************************************
468 */
469 static int arcmsr_suspend(device_t dev)
470 {
471 	struct AdapterControlBlock	*acb = device_get_softc(dev);
472 
473 	/* flush controller */
474 	arcmsr_iop_parking(acb);
475 	/* disable all outbound interrupt */
476 	arcmsr_disable_allintr(acb);
477 	return(0);
478 }
479 /*
480 *******************************************************************************
481 *******************************************************************************
482 */
483 static int arcmsr_resume(device_t dev)
484 {
485 	struct AdapterControlBlock	*acb = device_get_softc(dev);
486 
487 	arcmsr_iop_init(acb);
488 	return(0);
489 }
490 /*
491 *********************************************************************************
492 *********************************************************************************
493 */
494 static void arcmsr_async(void *cb_arg, u_int32_t code, struct cam_path *path, void *arg)
495 {
496 	struct AdapterControlBlock *acb;
497 	u_int8_t target_id, target_lun;
498 	struct cam_sim * sim;
499 
500 	sim=(struct cam_sim *) cb_arg;
501 	acb =(struct AdapterControlBlock *) cam_sim_softc(sim);
502 	switch (code) {
503 	case AC_LOST_DEVICE:
504 		target_id=xpt_path_target_id(path);
505 		target_lun=xpt_path_lun_id(path);
506 		if((target_id > ARCMSR_MAX_TARGETID) || (target_lun > ARCMSR_MAX_TARGETLUN)) {
507 			break;
508 		}
509 		kprintf("%s:scsi id=%d lun=%d device lost \n", device_get_name(acb->pci_dev), target_id, target_lun);
510 		break;
511 	default:
512 		break;
513 	}
514 }
515 /*
516 **********************************************************************
517 **********************************************************************
518 */
519 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag)
520 {
521 	struct AdapterControlBlock *acb=srb->acb;
522 	union ccb * pccb=srb->pccb;
523 
524 	if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
525 		bus_dmasync_op_t op;
526 
527 		if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
528 			op = BUS_DMASYNC_POSTREAD;
529 		} else {
530 			op = BUS_DMASYNC_POSTWRITE;
531 		}
532 		bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op);
533 		bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap);
534 	}
535 	if(stand_flag==1) {
536 		atomic_subtract_int(&acb->srboutstandingcount, 1);
537 		if((acb->acb_flags & ACB_F_CAM_DEV_QFRZN) && (
538 		acb->srboutstandingcount < ARCMSR_RELEASE_SIMQ_LEVEL)) {
539 			acb->acb_flags &= ~ACB_F_CAM_DEV_QFRZN;
540 			pccb->ccb_h.status |= CAM_RELEASE_SIMQ;
541 		}
542 	}
543 	srb->startdone=ARCMSR_SRB_DONE;
544 	srb->srb_flags=0;
545 	acb->srbworkingQ[acb->workingsrb_doneindex]=srb;
546 	acb->workingsrb_doneindex++;
547 	acb->workingsrb_doneindex %= ARCMSR_MAX_FREESRB_NUM;
548 	xpt_done(pccb);
549 	return;
550 }
551 /*
552 **********************************************************************
553 **********************************************************************
554 */
555 static void arcmsr_report_sense_info(struct CommandControlBlock *srb)
556 {
557 	union ccb * pccb=srb->pccb;
558 
559 	pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
560 	pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
561 	if(&pccb->csio.sense_data) {
562 		memset(&pccb->csio.sense_data, 0, sizeof(pccb->csio.sense_data));
563 		memcpy(&pccb->csio.sense_data, srb->arcmsr_cdb.SenseData,
564 		get_min(sizeof(struct SENSE_DATA), sizeof(pccb->csio.sense_data)));
565 		((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */
566 		pccb->ccb_h.status |= CAM_AUTOSNS_VALID;
567 	}
568 	return;
569 }
570 /*
571 *********************************************************************
572 *********************************************************************
573 */
574 static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
575 {
576 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD);
577 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
578 		kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
579 	}
580 	return;
581 }
582 /*
583 *********************************************************************
584 *********************************************************************
585 */
586 static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
587 {
588 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_ABORT_CMD);
589 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
590 		kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
591 	}
592 	return;
593 }
594 /*
595 *********************************************************************
596 *********************************************************************
597 */
598 static void arcmsr_abort_hbc_allcmd(struct AdapterControlBlock *acb)
599 {
600 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD);
601 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
602 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
603 		kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
604 	}
605 	return;
606 }
607 /*
608 *********************************************************************
609 *********************************************************************
610 */
611 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
612 {
613 	switch (acb->adapter_type) {
614 	case ACB_ADAPTER_TYPE_A: {
615 			arcmsr_abort_hba_allcmd(acb);
616 		}
617 		break;
618 	case ACB_ADAPTER_TYPE_B: {
619 			arcmsr_abort_hbb_allcmd(acb);
620 		}
621 		break;
622 	case ACB_ADAPTER_TYPE_C: {
623 			arcmsr_abort_hbc_allcmd(acb);
624 		}
625 		break;
626 	}
627 	return;
628 }
629 /*
630 **************************************************************************
631 **************************************************************************
632 */
633 static void arcmsr_report_srb_state(struct AdapterControlBlock *acb, struct CommandControlBlock *srb, u_int16_t error)
634 {
635 	int target, lun;
636 
637 	target=srb->pccb->ccb_h.target_id;
638 	lun=srb->pccb->ccb_h.target_lun;
639 	if(error == FALSE) {
640 		if(acb->devstate[target][lun]==ARECA_RAID_GONE) {
641 			acb->devstate[target][lun]=ARECA_RAID_GOOD;
642 		}
643 		srb->pccb->ccb_h.status |= CAM_REQ_CMP;
644 		arcmsr_srb_complete(srb, 1);
645 	} else {
646 		switch(srb->arcmsr_cdb.DeviceStatus) {
647 		case ARCMSR_DEV_SELECT_TIMEOUT: {
648 				if(acb->devstate[target][lun]==ARECA_RAID_GOOD) {
649 					kprintf( "arcmsr%d: Target=%x, Lun=%x, selection timeout, raid volume was lost\n", acb->pci_unit, target, lun);
650 				}
651 				acb->devstate[target][lun]=ARECA_RAID_GONE;
652 				srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
653 				arcmsr_srb_complete(srb, 1);
654 			}
655 			break;
656 		case ARCMSR_DEV_ABORTED:
657 		case ARCMSR_DEV_INIT_FAIL: {
658 				acb->devstate[target][lun]=ARECA_RAID_GONE;
659 				srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
660 				arcmsr_srb_complete(srb, 1);
661 			}
662 			break;
663 		case SCSISTAT_CHECK_CONDITION: {
664 				acb->devstate[target][lun]=ARECA_RAID_GOOD;
665 				arcmsr_report_sense_info(srb);
666 				arcmsr_srb_complete(srb, 1);
667 			}
668 			break;
669 		default:
670 			kprintf("arcmsr%d: scsi id=%d lun=%d isr got command error done,but got unknow DeviceStatus=0x%x \n"
671 					, acb->pci_unit, target, lun ,srb->arcmsr_cdb.DeviceStatus);
672 			acb->devstate[target][lun]=ARECA_RAID_GONE;
673 			srb->pccb->ccb_h.status |= CAM_UNCOR_PARITY;
674 			/*unknow error or crc error just for retry*/
675 			arcmsr_srb_complete(srb, 1);
676 			break;
677 		}
678 	}
679 	return;
680 }
681 /*
682 **************************************************************************
683 **************************************************************************
684 */
685 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, u_int32_t flag_srb, u_int16_t error)
686 {
687 	struct CommandControlBlock *srb;
688 
689 	/* check if command done with no error*/
690 	switch (acb->adapter_type) {
691 	case ACB_ADAPTER_TYPE_C:
692 		srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFF0));/*frame must be 32 bytes aligned*/
693 		break;
694 	case ACB_ADAPTER_TYPE_A:
695 	case ACB_ADAPTER_TYPE_B:
696 	default:
697 		srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
698 		break;
699 	}
700 	if((srb->acb!=acb) || (srb->startdone!=ARCMSR_SRB_START)) {
701 		if(srb->startdone==ARCMSR_SRB_ABORTED) {
702 			kprintf("arcmsr%d: srb='%p' isr got aborted command \n", acb->pci_unit, srb);
703 			srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
704 			arcmsr_srb_complete(srb, 1);
705 			return;
706 		}
707 		kprintf("arcmsr%d: isr get an illegal srb command done"
708 			"acb='%p' srb='%p' srbacb='%p' startdone=0x%xsrboutstandingcount=%d \n",
709 			acb->pci_unit, acb, srb, srb->acb,srb->startdone, acb->srboutstandingcount);
710 		return;
711 	}
712 	arcmsr_report_srb_state(acb, srb, error);
713 	return;
714 }
715 /*
716 **********************************************************************
717 **********************************************************************
718 */
719 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
720 {
721 	int i=0;
722 	u_int32_t flag_srb;
723 	u_int16_t error;
724 
725 	switch (acb->adapter_type) {
726 	case ACB_ADAPTER_TYPE_A: {
727 			u_int32_t outbound_intstatus;
728 
729 			/*clear and abort all outbound posted Q*/
730 			outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
731 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);/*clear interrupt*/
732 			while(((flag_srb=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport)) != 0xFFFFFFFF) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
733                 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
734 				arcmsr_drain_donequeue(acb, flag_srb, error);
735 			}
736 		}
737 		break;
738 	case ACB_ADAPTER_TYPE_B: {
739 			struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
740 
741 			/*clear all outbound posted Q*/
742 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */
743 			for(i=0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
744 				if((flag_srb=phbbmu->done_qbuffer[i])!=0) {
745 					phbbmu->done_qbuffer[i]=0;
746 			error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
747 					arcmsr_drain_donequeue(acb, flag_srb, error);
748 				}
749 				phbbmu->post_qbuffer[i]=0;
750 			}/*drain reply FIFO*/
751 			phbbmu->doneq_index=0;
752 			phbbmu->postq_index=0;
753 		}
754 		break;
755 	case ACB_ADAPTER_TYPE_C: {
756 
757 			while((CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
758 				flag_srb=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
759                 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
760 				arcmsr_drain_donequeue(acb, flag_srb, error);
761 			}
762 		}
763 		break;
764 	}
765 	return;
766 }
767 /*
768 ****************************************************************************
769 ****************************************************************************
770 */
771 static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
772 {
773 	struct CommandControlBlock *srb;
774 	u_int32_t intmask_org;
775 	u_int32_t i=0;
776 
777 	if(acb->srboutstandingcount>0) {
778 		/* disable all outbound interrupt */
779 		intmask_org=arcmsr_disable_allintr(acb);
780 		/*clear and abort all outbound posted Q*/
781 		arcmsr_done4abort_postqueue(acb);
782 		/* talk to iop 331 outstanding command aborted*/
783 		arcmsr_abort_allcmd(acb);
784 		for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
785 			srb=acb->psrb_pool[i];
786 			if(srb->startdone==ARCMSR_SRB_START) {
787 				srb->startdone=ARCMSR_SRB_ABORTED;
788 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
789 				arcmsr_srb_complete(srb, 1);
790 			}
791 		}
792 		/* enable all outbound interrupt */
793 		arcmsr_enable_allintr(acb, intmask_org);
794 	}
795 	atomic_set_int(&acb->srboutstandingcount, 0);
796 	acb->workingsrb_doneindex=0;
797 	acb->workingsrb_startindex=0;
798 	return;
799 }
800 /*
801 **********************************************************************
802 **********************************************************************
803 */
804 static void arcmsr_build_srb(struct CommandControlBlock *srb,
805 		bus_dma_segment_t *dm_segs, u_int32_t nseg)
806 {
807 	struct ARCMSR_CDB * arcmsr_cdb= &srb->arcmsr_cdb;
808 	u_int8_t * psge=(u_int8_t *)&arcmsr_cdb->u;
809 	u_int32_t address_lo, address_hi;
810 	union ccb * pccb=srb->pccb;
811 	struct ccb_scsiio * pcsio= &pccb->csio;
812 	u_int32_t arccdbsize=0x30;
813 
814 	memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
815 	arcmsr_cdb->Bus=0;
816 	arcmsr_cdb->TargetID=pccb->ccb_h.target_id;
817 	arcmsr_cdb->LUN=pccb->ccb_h.target_lun;
818 	arcmsr_cdb->Function=1;
819 	arcmsr_cdb->CdbLength=(u_int8_t)pcsio->cdb_len;
820 	arcmsr_cdb->Context=0;
821 	bcopy(pcsio->cdb_io.cdb_bytes, arcmsr_cdb->Cdb, pcsio->cdb_len);
822 	if(nseg != 0) {
823 		struct AdapterControlBlock *acb=srb->acb;
824 		bus_dmasync_op_t op;
825 		u_int32_t length, i, cdb_sgcount=0;
826 
827 		if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
828 			op=BUS_DMASYNC_PREREAD;
829 		} else {
830 			op=BUS_DMASYNC_PREWRITE;
831 			arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_WRITE;
832 			srb->srb_flags|=SRB_FLAG_WRITE;
833 		}
834 		bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op);
835 		for(i=0;i<nseg;i++) {
836 			/* Get the physical address of the current data pointer */
837 			length=arcmsr_htole32(dm_segs[i].ds_len);
838 			address_lo=arcmsr_htole32(dma_addr_lo32(dm_segs[i].ds_addr));
839 			address_hi=arcmsr_htole32(dma_addr_hi32(dm_segs[i].ds_addr));
840 			if(address_hi==0) {
841 				struct SG32ENTRY * pdma_sg=(struct SG32ENTRY *)psge;
842 				pdma_sg->address=address_lo;
843 				pdma_sg->length=length;
844 				psge += sizeof(struct SG32ENTRY);
845 				arccdbsize += sizeof(struct SG32ENTRY);
846 			} else {
847 				u_int32_t sg64s_size=0, tmplength=length;
848 
849 				while(1) {
850 					u_int64_t span4G, length0;
851 					struct SG64ENTRY * pdma_sg=(struct SG64ENTRY *)psge;
852 
853 					span4G=(u_int64_t)address_lo + tmplength;
854 					pdma_sg->addresshigh=address_hi;
855 					pdma_sg->address=address_lo;
856 					if(span4G > 0x100000000) {
857 						/*see if cross 4G boundary*/
858 						length0=0x100000000-address_lo;
859 						pdma_sg->length=(u_int32_t)length0|IS_SG64_ADDR;
860 						address_hi=address_hi+1;
861 						address_lo=0;
862 						tmplength=tmplength-(u_int32_t)length0;
863 						sg64s_size += sizeof(struct SG64ENTRY);
864 						psge += sizeof(struct SG64ENTRY);
865 						cdb_sgcount++;
866 					} else {
867 						pdma_sg->length=tmplength|IS_SG64_ADDR;
868 						sg64s_size += sizeof(struct SG64ENTRY);
869 						psge += sizeof(struct SG64ENTRY);
870 						break;
871 					}
872 				}
873 				arccdbsize += sg64s_size;
874 			}
875 			cdb_sgcount++;
876 		}
877 		arcmsr_cdb->sgcount=(u_int8_t)cdb_sgcount;
878 		arcmsr_cdb->DataLength=pcsio->dxfer_len;
879 		if( arccdbsize > 256) {
880 			arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_SGL_BSIZE;
881 		}
882 	} else {
883 		arcmsr_cdb->DataLength = 0;
884 	}
885     srb->arc_cdb_size=arccdbsize;
886 	return;
887 }
888 /*
889 **************************************************************************
890 **************************************************************************
891 */
892 static void arcmsr_post_srb(struct AdapterControlBlock *acb, struct CommandControlBlock *srb)
893 {
894 	u_int32_t cdb_shifted_phyaddr=(u_int32_t) srb->cdb_shifted_phyaddr;
895 	struct ARCMSR_CDB * arcmsr_cdb=(struct ARCMSR_CDB *)&srb->arcmsr_cdb;
896 
897 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, (srb->srb_flags & SRB_FLAG_WRITE) ? BUS_DMASYNC_POSTWRITE:BUS_DMASYNC_POSTREAD);
898 	atomic_add_int(&acb->srboutstandingcount, 1);
899 	srb->startdone=ARCMSR_SRB_START;
900 
901 	switch (acb->adapter_type) {
902 	case ACB_ADAPTER_TYPE_A: {
903 			if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
904 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_shifted_phyaddr|ARCMSR_SRBPOST_FLAG_SGL_BSIZE);
905 			} else {
906 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_shifted_phyaddr);
907 			}
908 		}
909 		break;
910 	case ACB_ADAPTER_TYPE_B: {
911 			struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
912 			int ending_index, index;
913 
914 			index=phbbmu->postq_index;
915 			ending_index=((index+1)%ARCMSR_MAX_HBB_POSTQUEUE);
916 			phbbmu->post_qbuffer[ending_index]=0;
917 			if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
918 				phbbmu->post_qbuffer[index]= cdb_shifted_phyaddr|ARCMSR_SRBPOST_FLAG_SGL_BSIZE;
919 			} else {
920 				phbbmu->post_qbuffer[index]= cdb_shifted_phyaddr;
921 			}
922 			index++;
923 			index %= ARCMSR_MAX_HBB_POSTQUEUE;     /*if last index number set it to 0 */
924 			phbbmu->postq_index=index;
925 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_CDB_POSTED);
926 		}
927 		break;
928     case ACB_ADAPTER_TYPE_C:
929         {
930             u_int32_t ccb_post_stamp, arc_cdb_size, cdb_phyaddr_hi32;
931 
932             arc_cdb_size=(srb->arc_cdb_size>0x300)?0x300:srb->arc_cdb_size;
933             ccb_post_stamp=(cdb_shifted_phyaddr | ((arc_cdb_size-1) >> 6) | 1);
934 			cdb_phyaddr_hi32 = acb->srb_phyaddr.B.phyadd_high;
935             if(cdb_phyaddr_hi32)
936             {
937 			    CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_high, cdb_phyaddr_hi32);
938 			    CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp);
939             }
940             else
941             {
942 			    CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp);
943             }
944 		}
945 		break;
946 	}
947 	return;
948 }
949 /*
950 ************************************************************************
951 ************************************************************************
952 */
953 static struct QBUFFER * arcmsr_get_iop_rqbuffer( struct AdapterControlBlock *acb)
954 {
955 	struct QBUFFER *qbuffer=NULL;
956 
957 	switch (acb->adapter_type) {
958 	case ACB_ADAPTER_TYPE_A: {
959 			struct HBA_MessageUnit *phbamu=(struct HBA_MessageUnit *)acb->pmu;
960 
961 			qbuffer=(struct QBUFFER *)&phbamu->message_rbuffer;
962 		}
963 		break;
964 	case ACB_ADAPTER_TYPE_B: {
965 			struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
966 
967 			qbuffer=(struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_rbuffer;
968 		}
969 		break;
970 	case ACB_ADAPTER_TYPE_C: {
971 			struct HBC_MessageUnit *phbcmu=(struct HBC_MessageUnit *)acb->pmu;
972 
973 			qbuffer=(struct QBUFFER *)&phbcmu->message_rbuffer;
974 		}
975 		break;
976 	}
977 	return(qbuffer);
978 }
979 /*
980 ************************************************************************
981 ************************************************************************
982 */
983 static struct QBUFFER * arcmsr_get_iop_wqbuffer( struct AdapterControlBlock *acb)
984 {
985 	struct QBUFFER *qbuffer=NULL;
986 
987 	switch (acb->adapter_type) {
988 	case ACB_ADAPTER_TYPE_A: {
989 			struct HBA_MessageUnit *phbamu=(struct HBA_MessageUnit *)acb->pmu;
990 
991 			qbuffer=(struct QBUFFER *)&phbamu->message_wbuffer;
992 		}
993 		break;
994 	case ACB_ADAPTER_TYPE_B: {
995 			struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
996 
997 			qbuffer=(struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_wbuffer;
998 		}
999 		break;
1000 	case ACB_ADAPTER_TYPE_C: {
1001 			struct HBC_MessageUnit *phbcmu=(struct HBC_MessageUnit *)acb->pmu;
1002 
1003 			qbuffer=(struct QBUFFER *)&phbcmu->message_wbuffer;
1004 		}
1005 		break;
1006 	}
1007 	return(qbuffer);
1008 }
1009 /*
1010 **************************************************************************
1011 **************************************************************************
1012 */
1013 static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
1014 {
1015 	switch (acb->adapter_type) {
1016 	case ACB_ADAPTER_TYPE_A: {
1017 			/* let IOP know data has been read */
1018 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
1019 		}
1020 		break;
1021 	case ACB_ADAPTER_TYPE_B: {
1022 			/* let IOP know data has been read */
1023 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK);
1024 		}
1025 		break;
1026 	case ACB_ADAPTER_TYPE_C: {
1027 			/* let IOP know data has been read */
1028 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
1029 		}
1030 	}
1031 	return;
1032 }
1033 /*
1034 **************************************************************************
1035 **************************************************************************
1036 */
1037 static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
1038 {
1039 	switch (acb->adapter_type) {
1040 	case ACB_ADAPTER_TYPE_A: {
1041 			/*
1042 			** push inbound doorbell tell iop, driver data write ok
1043 			** and wait reply on next hwinterrupt for next Qbuffer post
1044 			*/
1045 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK);
1046 		}
1047 		break;
1048 	case ACB_ADAPTER_TYPE_B: {
1049 			/*
1050 			** push inbound doorbell tell iop, driver data write ok
1051 			** and wait reply on next hwinterrupt for next Qbuffer post
1052 			*/
1053 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_WRITE_OK);
1054 		}
1055 		break;
1056 	case ACB_ADAPTER_TYPE_C: {
1057 			/*
1058 			** push inbound doorbell tell iop, driver data write ok
1059 			** and wait reply on next hwinterrupt for next Qbuffer post
1060 			*/
1061 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK);
1062 		}
1063 		break;
1064 	}
1065 }
1066 /*
1067 **********************************************************************
1068 **********************************************************************
1069 */
1070 static void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb)
1071 {
1072 	u_int8_t *pQbuffer;
1073 	struct QBUFFER *pwbuffer;
1074 	u_int8_t * iop_data;
1075 	int32_t allxfer_len=0;
1076 
1077 	pwbuffer=arcmsr_get_iop_wqbuffer(acb);
1078 	iop_data=(u_int8_t *)pwbuffer->data;
1079 	if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) {
1080 		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
1081 		while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex)
1082 			&& (allxfer_len<124)) {
1083 			pQbuffer=&acb->wqbuffer[acb->wqbuf_firstindex];
1084 			memcpy(iop_data, pQbuffer, 1);
1085 			acb->wqbuf_firstindex++;
1086 			acb->wqbuf_firstindex %=ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */
1087 			iop_data++;
1088 			allxfer_len++;
1089 		}
1090 		pwbuffer->data_len=allxfer_len;
1091 		/*
1092 		** push inbound doorbell and wait reply at hwinterrupt routine for next Qbuffer post
1093 		*/
1094 		arcmsr_iop_message_wrote(acb);
1095 	}
1096 	return;
1097 }
1098 /*
1099 ************************************************************************
1100 ************************************************************************
1101 */
1102 static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb)
1103 {
1104 	acb->acb_flags &=~ACB_F_MSG_START_BGRB;
1105 	CHIP_REG_WRITE32(HBA_MessageUnit,
1106 	0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
1107 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
1108 		kprintf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
1109 			, acb->pci_unit);
1110 	}
1111 	return;
1112 }
1113 /*
1114 ************************************************************************
1115 ************************************************************************
1116 */
1117 static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb)
1118 {
1119 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1120 	CHIP_REG_WRITE32(HBB_DOORBELL,
1121 	0, drv2iop_doorbell, ARCMSR_MESSAGE_STOP_BGRB);
1122 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
1123 		kprintf( "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
1124 			, acb->pci_unit);
1125 	}
1126 	return;
1127 }
1128 /*
1129 ************************************************************************
1130 ************************************************************************
1131 */
1132 static void arcmsr_stop_hbc_bgrb(struct AdapterControlBlock *acb)
1133 {
1134 	acb->acb_flags &=~ACB_F_MSG_START_BGRB;
1135 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
1136 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
1137 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
1138 		kprintf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n", acb->pci_unit);
1139 	}
1140 	return;
1141 }
1142 /*
1143 ************************************************************************
1144 ************************************************************************
1145 */
1146 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
1147 {
1148 	switch (acb->adapter_type) {
1149 	case ACB_ADAPTER_TYPE_A: {
1150 			arcmsr_stop_hba_bgrb(acb);
1151 		}
1152 		break;
1153 	case ACB_ADAPTER_TYPE_B: {
1154 			arcmsr_stop_hbb_bgrb(acb);
1155 		}
1156 		break;
1157 	case ACB_ADAPTER_TYPE_C: {
1158 			arcmsr_stop_hbc_bgrb(acb);
1159 		}
1160 		break;
1161 	}
1162 	return;
1163 }
1164 /*
1165 ************************************************************************
1166 ************************************************************************
1167 */
1168 static void arcmsr_poll(struct cam_sim * psim)
1169 {
1170 	struct AdapterControlBlock *acb;
1171 
1172 	acb = (struct AdapterControlBlock *)cam_sim_softc(psim);
1173 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1174 	arcmsr_interrupt(acb);
1175 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1176 	return;
1177 }
1178 /*
1179 **************************************************************************
1180 **************************************************************************
1181 */
1182 static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
1183 {
1184 	struct QBUFFER *prbuffer;
1185 	u_int8_t *pQbuffer;
1186 	u_int8_t *iop_data;
1187 	int my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex;
1188 
1189 	/*check this iop data if overflow my rqbuffer*/
1190 	rqbuf_lastindex=acb->rqbuf_lastindex;
1191 	rqbuf_firstindex=acb->rqbuf_firstindex;
1192 	prbuffer=arcmsr_get_iop_rqbuffer(acb);
1193 	iop_data=(u_int8_t *)prbuffer->data;
1194 	iop_len=prbuffer->data_len;
1195 	my_empty_len=(rqbuf_firstindex-rqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1);
1196 	if(my_empty_len>=iop_len) {
1197 		while(iop_len > 0) {
1198 			pQbuffer=&acb->rqbuffer[rqbuf_lastindex];
1199 			memcpy(pQbuffer, iop_data, 1);
1200 			rqbuf_lastindex++;
1201 			rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;/*if last index number set it to 0 */
1202 			iop_data++;
1203 			iop_len--;
1204 		}
1205 		acb->rqbuf_lastindex=rqbuf_lastindex;
1206 		arcmsr_iop_message_read(acb);
1207 		/*signature, let IOP know data has been read */
1208 	} else {
1209 		acb->acb_flags|=ACB_F_IOPDATA_OVERFLOW;
1210 	}
1211 	return;
1212 }
1213 /*
1214 **************************************************************************
1215 **************************************************************************
1216 */
1217 static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
1218 {
1219 	acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READ;
1220 	/*
1221 	*****************************************************************
1222 	**   check if there are any mail packages from user space program
1223 	**   in my post bag, now is the time to send them into Areca's firmware
1224 	*****************************************************************
1225 	*/
1226 	if(acb->wqbuf_firstindex!=acb->wqbuf_lastindex) {
1227 		u_int8_t *pQbuffer;
1228 		struct QBUFFER *pwbuffer;
1229 		u_int8_t *iop_data;
1230 		int allxfer_len=0;
1231 
1232 		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
1233 		pwbuffer=arcmsr_get_iop_wqbuffer(acb);
1234 		iop_data=(u_int8_t *)pwbuffer->data;
1235 		while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex)
1236 			&& (allxfer_len<124)) {
1237 			pQbuffer=&acb->wqbuffer[acb->wqbuf_firstindex];
1238 			memcpy(iop_data, pQbuffer, 1);
1239 			acb->wqbuf_firstindex++;
1240 			acb->wqbuf_firstindex %=ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */
1241 			iop_data++;
1242 			allxfer_len++;
1243 		}
1244 		pwbuffer->data_len=allxfer_len;
1245 		/*
1246 		** push inbound doorbell tell iop driver data write ok
1247 		** and wait reply on next hwinterrupt for next Qbuffer post
1248 		*/
1249 		arcmsr_iop_message_wrote(acb);
1250 	}
1251 	if(acb->wqbuf_firstindex==acb->wqbuf_lastindex) {
1252 		acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
1253 	}
1254 	return;
1255 }
1256 
1257 static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb)
1258 {
1259 /*
1260 	if (ccb->ccb_h.status != CAM_REQ_CMP)
1261 		kprintf("arcmsr_rescanLun_cb: Rescan Target=%x, lun=%x, failure status=%x\n",ccb->ccb_h.target_id,ccb->ccb_h.target_lun,ccb->ccb_h.status);
1262 	else
1263 		kprintf("arcmsr_rescanLun_cb: Rescan lun successfully!\n");
1264 */
1265 	xpt_free_path(ccb->ccb_h.path);
1266 }
1267 
1268 static void	arcmsr_rescan_lun(struct AdapterControlBlock *acb, int target, int lun)
1269 {
1270 	struct cam_path     *path;
1271 	union ccb            ccb;
1272 
1273 	if (xpt_create_path(&path, xpt_periph, cam_sim_path(acb->psim), target, lun) != CAM_REQ_CMP)
1274 		return;
1275 /*	kprintf("arcmsr_rescan_lun: Rescan Target=%x, Lun=%x\n", target, lun); */
1276 	bzero(&ccb, sizeof(union ccb));
1277 	xpt_setup_ccb(&ccb.ccb_h, path, 5);
1278 	ccb.ccb_h.func_code = XPT_SCAN_LUN;
1279 	ccb.ccb_h.cbfcnp = arcmsr_rescanLun_cb;
1280 	ccb.crcn.flags = CAM_FLAG_NONE;
1281 	xpt_action(&ccb);
1282 	return;
1283 }
1284 
1285 
1286 static void arcmsr_abort_dr_ccbs(struct AdapterControlBlock *acb, int target, int lun)
1287 {
1288 	struct CommandControlBlock *srb;
1289 	u_int32_t intmask_org;
1290 	int i;
1291 
1292 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1293 	/* disable all outbound interrupts */
1294 	intmask_org = arcmsr_disable_allintr(acb);
1295 	for (i = 0; i < ARCMSR_MAX_FREESRB_NUM; i++)
1296 	{
1297 		srb = acb->psrb_pool[i];
1298 		if (srb->startdone == ARCMSR_SRB_START)
1299 		{
1300 		if((target == srb->pccb->ccb_h.target_id) && (lun == srb->pccb->ccb_h.target_lun))
1301             {
1302 			srb->startdone = ARCMSR_SRB_ABORTED;
1303 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
1304 			arcmsr_srb_complete(srb, 1);
1305 		}
1306 		}
1307 	}
1308 	/* enable outbound Post Queue, outbound doorbell Interrupt */
1309 	arcmsr_enable_allintr(acb, intmask_org);
1310 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1311 }
1312 
1313 
1314 /*
1315 **************************************************************************
1316 **************************************************************************
1317 */
1318 static void arcmsr_dr_handle(struct AdapterControlBlock *acb) {
1319 	u_int32_t	devicemap;
1320 	u_int32_t	target, lun;
1321     u_int32_t	deviceMapCurrent[4]={0};
1322     u_int8_t	*pDevMap;
1323 
1324 	switch (acb->adapter_type) {
1325 	case ACB_ADAPTER_TYPE_A:
1326 			devicemap = offsetof(struct HBA_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1327 			for (target= 0; target < 4; target++)
1328 			{
1329 		deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0],  devicemap);
1330 		devicemap += 4;
1331 			}
1332 			break;
1333 
1334 	case ACB_ADAPTER_TYPE_B:
1335 			devicemap = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1336 			for (target= 0; target < 4; target++)
1337 			{
1338 		deviceMapCurrent[target]=bus_space_read_4(acb->btag[1], acb->bhandle[1],  devicemap);
1339 		devicemap += 4;
1340 			}
1341 			break;
1342 
1343 	case ACB_ADAPTER_TYPE_C:
1344 			devicemap = offsetof(struct HBC_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1345 			for (target= 0; target < 4; target++)
1346 			{
1347 		deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0],  devicemap);
1348 		devicemap += 4;
1349 			}
1350 			break;
1351 	}
1352 		if(acb->acb_flags & ACB_F_BUS_HANG_ON)
1353 		{
1354 			acb->acb_flags &= ~ACB_F_BUS_HANG_ON;
1355 		}
1356 		/*
1357 		** adapter posted CONFIG message
1358 		** copy the new map, note if there are differences with the current map
1359 		*/
1360 		pDevMap = (u_int8_t	*)&deviceMapCurrent[0];
1361 		for (target= 0; target < ARCMSR_MAX_TARGETID - 1; target++)
1362 		{
1363 			if (*pDevMap != acb->device_map[target])
1364 			{
1365                 u_int8_t difference, bit_check;
1366 
1367                 difference= *pDevMap ^ acb->device_map[target];
1368                 for(lun=0; lun < ARCMSR_MAX_TARGETLUN; lun++)
1369                 {
1370                     bit_check=(1 << lun);						/*check bit from 0....31*/
1371                     if(difference & bit_check)
1372                     {
1373                         if(acb->device_map[target] & bit_check)
1374                         {/* unit departed */
1375 							kprintf("arcmsr_dr_handle: Target=%x, lun=%x, GONE!!!\n",target,lun);
1376 							arcmsr_abort_dr_ccbs(acb, target, lun);
1377 				arcmsr_rescan_lun(acb, target, lun);
1378 						acb->devstate[target][lun] = ARECA_RAID_GONE;
1379                         }
1380                         else
1381                         {/* unit arrived */
1382 							kprintf("arcmsr_dr_handle: Target=%x, lun=%x, ARRIVING!!!\n",target,lun);
1383 				arcmsr_rescan_lun(acb, target, lun);
1384 						acb->devstate[target][lun] = ARECA_RAID_GOOD;
1385                         }
1386                     }
1387                 }
1388 /*				kprintf("arcmsr_dr_handle: acb->device_map[%x]=0x%x, deviceMapCurrent[%x]=%x\n",target,acb->device_map[target],target,*pDevMap); */
1389 				acb->device_map[target]= *pDevMap;
1390 			}
1391 			pDevMap++;
1392 		}
1393 }
1394 /*
1395 **************************************************************************
1396 **************************************************************************
1397 */
1398 static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb) {
1399 	u_int32_t outbound_message;
1400 
1401 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
1402 	outbound_message = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[0]);
1403 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1404 		arcmsr_dr_handle( acb );
1405 }
1406 /*
1407 **************************************************************************
1408 **************************************************************************
1409 */
1410 static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb) {
1411 	u_int32_t outbound_message;
1412 
1413 	/* clear interrupts */
1414 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
1415 	outbound_message = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0]);
1416 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1417 		arcmsr_dr_handle( acb );
1418 }
1419 /*
1420 **************************************************************************
1421 **************************************************************************
1422 */
1423 static void arcmsr_hbc_message_isr(struct AdapterControlBlock *acb) {
1424 	u_int32_t outbound_message;
1425 
1426 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);
1427 	outbound_message = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[0]);
1428 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1429 		arcmsr_dr_handle( acb );
1430 }
1431 /*
1432 **************************************************************************
1433 **************************************************************************
1434 */
1435 static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb)
1436 {
1437 	u_int32_t outbound_doorbell;
1438 
1439 	/*
1440 	*******************************************************************
1441 	**  Maybe here we need to check wrqbuffer_lock is lock or not
1442 	**  DOORBELL: din! don!
1443 	**  check if there are any mail need to pack from firmware
1444 	*******************************************************************
1445 	*/
1446 	outbound_doorbell=CHIP_REG_READ32(HBA_MessageUnit,
1447 	0, outbound_doorbell);
1448 	CHIP_REG_WRITE32(HBA_MessageUnit,
1449 	0, outbound_doorbell, outbound_doorbell); /* clear doorbell interrupt */
1450 	if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
1451 		arcmsr_iop2drv_data_wrote_handle(acb);
1452 	}
1453 	if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
1454 		arcmsr_iop2drv_data_read_handle(acb);
1455 	}
1456 	return;
1457 }
1458 /*
1459 **************************************************************************
1460 **************************************************************************
1461 */
1462 static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *acb)
1463 {
1464 	u_int32_t outbound_doorbell;
1465 
1466 	/*
1467 	*******************************************************************
1468 	**  Maybe here we need to check wrqbuffer_lock is lock or not
1469 	**  DOORBELL: din! don!
1470 	**  check if there are any mail need to pack from firmware
1471 	*******************************************************************
1472 	*/
1473 	outbound_doorbell=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell);
1474 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell); /* clear doorbell interrupt */
1475 	if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
1476 		arcmsr_iop2drv_data_wrote_handle(acb);
1477 	}
1478 	if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) {
1479 		arcmsr_iop2drv_data_read_handle(acb);
1480 	}
1481 	if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
1482 		arcmsr_hbc_message_isr(acb);    /* messenger of "driver to iop commands" */
1483 	}
1484 	return;
1485 }
1486 /*
1487 **************************************************************************
1488 **************************************************************************
1489 */
1490 static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
1491 {
1492 	u_int32_t flag_srb;
1493 	u_int16_t error;
1494 
1495 	/*
1496 	*****************************************************************************
1497 	**               areca cdb command done
1498 	*****************************************************************************
1499 	*/
1500 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap,
1501 		BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1502 	while((flag_srb=CHIP_REG_READ32(HBA_MessageUnit,
1503 		0, outbound_queueport)) != 0xFFFFFFFF) {
1504 		/* check if command done with no error*/
1505         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
1506 		arcmsr_drain_donequeue(acb, flag_srb, error);
1507 	}	/*drain reply FIFO*/
1508 	return;
1509 }
1510 /*
1511 **************************************************************************
1512 **************************************************************************
1513 */
1514 static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
1515 {
1516 	struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
1517 	u_int32_t flag_srb;
1518 	int index;
1519 	u_int16_t error;
1520 
1521 	/*
1522 	*****************************************************************************
1523 	**               areca cdb command done
1524 	*****************************************************************************
1525 	*/
1526 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap,
1527 		BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1528 	index=phbbmu->doneq_index;
1529 	while((flag_srb=phbbmu->done_qbuffer[index]) != 0) {
1530 		phbbmu->done_qbuffer[index]=0;
1531 		index++;
1532 		index %= ARCMSR_MAX_HBB_POSTQUEUE;     /*if last index number set it to 0 */
1533 		phbbmu->doneq_index=index;
1534 		/* check if command done with no error*/
1535         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
1536 		arcmsr_drain_donequeue(acb, flag_srb, error);
1537 	}	/*drain reply FIFO*/
1538 	return;
1539 }
1540 /*
1541 **************************************************************************
1542 **************************************************************************
1543 */
1544 static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb)
1545 {
1546 	u_int32_t flag_srb,throttling=0;
1547 	u_int16_t error;
1548 
1549 	/*
1550 	*****************************************************************************
1551 	**               areca cdb command done
1552 	*****************************************************************************
1553 	*/
1554 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1555 
1556 	while(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
1557 
1558 		flag_srb=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
1559 		/* check if command done with no error*/
1560         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
1561 		arcmsr_drain_donequeue(acb, flag_srb, error);
1562         if(throttling==ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
1563             CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING);
1564             break;
1565         }
1566         throttling++;
1567 	}	/*drain reply FIFO*/
1568 	return;
1569 }
1570 /*
1571 **********************************************************************
1572 **********************************************************************
1573 */
1574 static void arcmsr_handle_hba_isr( struct AdapterControlBlock *acb)
1575 {
1576 	u_int32_t outbound_intstatus;
1577 	/*
1578 	*********************************************
1579 	**   check outbound intstatus
1580 	*********************************************
1581 	*/
1582 	outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
1583 	if(!outbound_intstatus) {
1584 		/*it must be share irq*/
1585 		return;
1586 	}
1587 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);/*clear interrupt*/
1588 	/* MU doorbell interrupts*/
1589 	if(outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) {
1590 		arcmsr_hba_doorbell_isr(acb);
1591 	}
1592 	/* MU post queue interrupts*/
1593 	if(outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
1594 		arcmsr_hba_postqueue_isr(acb);
1595 	}
1596 	if(outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
1597 		arcmsr_hba_message_isr(acb);
1598 	}
1599 	return;
1600 }
1601 /*
1602 **********************************************************************
1603 **********************************************************************
1604 */
1605 static void arcmsr_handle_hbb_isr( struct AdapterControlBlock *acb)
1606 {
1607 	u_int32_t outbound_doorbell;
1608 	/*
1609 	*********************************************
1610 	**   check outbound intstatus
1611 	*********************************************
1612 	*/
1613 	outbound_doorbell=CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & acb->outbound_int_enable;
1614 	if(!outbound_doorbell) {
1615 		/*it must be share irq*/
1616 		return;
1617 	}
1618 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ~outbound_doorbell); /* clear doorbell interrupt */
1619 	CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell);
1620 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
1621 	/* MU ioctl transfer doorbell interrupts*/
1622 	if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
1623 		arcmsr_iop2drv_data_wrote_handle(acb);
1624 	}
1625 	if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) {
1626 		arcmsr_iop2drv_data_read_handle(acb);
1627 	}
1628 	/* MU post queue interrupts*/
1629 	if(outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) {
1630 		arcmsr_hbb_postqueue_isr(acb);
1631 	}
1632 	if(outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
1633 		arcmsr_hbb_message_isr(acb);
1634 	}
1635 	return;
1636 }
1637 /*
1638 **********************************************************************
1639 **********************************************************************
1640 */
1641 static void arcmsr_handle_hbc_isr( struct AdapterControlBlock *acb)
1642 {
1643 	u_int32_t host_interrupt_status;
1644 	/*
1645 	*********************************************
1646 	**   check outbound intstatus
1647 	*********************************************
1648 	*/
1649 	host_interrupt_status=CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status);
1650 	if(!host_interrupt_status) {
1651 		/*it must be share irq*/
1652 		return;
1653 	}
1654 	/* MU doorbell interrupts*/
1655 	if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) {
1656 		arcmsr_hbc_doorbell_isr(acb);
1657 	}
1658 	/* MU post queue interrupts*/
1659 	if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
1660 		arcmsr_hbc_postqueue_isr(acb);
1661 	}
1662 	return;
1663 }
1664 /*
1665 ******************************************************************************
1666 ******************************************************************************
1667 */
1668 static void arcmsr_interrupt(struct AdapterControlBlock *acb)
1669 {
1670 	switch (acb->adapter_type) {
1671 	case ACB_ADAPTER_TYPE_A:
1672 		arcmsr_handle_hba_isr(acb);
1673 		break;
1674 	case ACB_ADAPTER_TYPE_B:
1675 		arcmsr_handle_hbb_isr(acb);
1676 		break;
1677 	case ACB_ADAPTER_TYPE_C:
1678 		arcmsr_handle_hbc_isr(acb);
1679 		break;
1680 	default:
1681 		kprintf("arcmsr%d: interrupt service,"
1682 		" unknow adapter type =%d\n", acb->pci_unit, acb->adapter_type);
1683 		break;
1684 	}
1685 	return;
1686 }
1687 /*
1688 **********************************************************************
1689 **********************************************************************
1690 */
1691 static void arcmsr_intr_handler(void *arg)
1692 {
1693 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)arg;
1694 
1695 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1696 	arcmsr_interrupt(acb);
1697 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1698 }
1699 /*
1700 ******************************************************************************
1701 ******************************************************************************
1702 */
1703 static void	arcmsr_polling_devmap(void* arg)
1704 {
1705 	struct AdapterControlBlock *acb = (struct AdapterControlBlock *)arg;
1706 	switch (acb->adapter_type) {
1707 	case ACB_ADAPTER_TYPE_A:
1708 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
1709 		break;
1710 
1711 	case ACB_ADAPTER_TYPE_B:
1712 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
1713 		break;
1714 
1715 	case ACB_ADAPTER_TYPE_C:
1716 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
1717 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
1718 		break;
1719 	}
1720 
1721 	if((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)
1722 	{
1723 		callout_reset(&acb->devmap_callout, 5 * hz, arcmsr_polling_devmap, acb);	/* polling per 5 seconds */
1724 	}
1725 }
1726 
1727 /*
1728 *******************************************************************************
1729 **
1730 *******************************************************************************
1731 */
1732 static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
1733 {
1734 	u_int32_t intmask_org;
1735 
1736 	if(acb!=NULL) {
1737 		/* stop adapter background rebuild */
1738 		if(acb->acb_flags & ACB_F_MSG_START_BGRB) {
1739 			intmask_org = arcmsr_disable_allintr(acb);
1740 			arcmsr_stop_adapter_bgrb(acb);
1741 			arcmsr_flush_adapter_cache(acb);
1742 			arcmsr_enable_allintr(acb, intmask_org);
1743 		}
1744 	}
1745 }
1746 /*
1747 ***********************************************************************
1748 **
1749 ************************************************************************
1750 */
1751 u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg)
1752 {
1753 	struct CMD_MESSAGE_FIELD * pcmdmessagefld;
1754 	u_int32_t retvalue=EINVAL;
1755 
1756 	pcmdmessagefld=(struct CMD_MESSAGE_FIELD *) arg;
1757 	if(memcmp(pcmdmessagefld->cmdmessage.Signature, "ARCMSR", 6)!=0) {
1758 		return retvalue;
1759 	}
1760 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1761 	switch(ioctl_cmd) {
1762 	case ARCMSR_MESSAGE_READ_RQBUFFER: {
1763 			u_int8_t * pQbuffer;
1764 			u_int8_t * ptmpQbuffer=pcmdmessagefld->messagedatabuffer;
1765 			u_int32_t allxfer_len=0;
1766 
1767 			while((acb->rqbuf_firstindex!=acb->rqbuf_lastindex)
1768 				&& (allxfer_len<1031)) {
1769 				/*copy READ QBUFFER to srb*/
1770 				pQbuffer= &acb->rqbuffer[acb->rqbuf_firstindex];
1771 				memcpy(ptmpQbuffer, pQbuffer, 1);
1772 				acb->rqbuf_firstindex++;
1773 				acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
1774 				/*if last index number set it to 0 */
1775 				ptmpQbuffer++;
1776 				allxfer_len++;
1777 			}
1778 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1779 				struct QBUFFER * prbuffer;
1780 				u_int8_t * iop_data;
1781 				u_int32_t iop_len;
1782 
1783 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1784 				prbuffer=arcmsr_get_iop_rqbuffer(acb);
1785 				iop_data=(u_int8_t *)prbuffer->data;
1786 				iop_len=(u_int32_t)prbuffer->data_len;
1787 				/*this iop data does no chance to make me overflow again here, so just do it*/
1788 				while(iop_len>0) {
1789 					pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex];
1790 					memcpy(pQbuffer, iop_data, 1);
1791 					acb->rqbuf_lastindex++;
1792 					acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1793 					/*if last index number set it to 0 */
1794 					iop_data++;
1795 					iop_len--;
1796 				}
1797 				arcmsr_iop_message_read(acb);
1798 				/*signature, let IOP know data has been readed */
1799 			}
1800 			pcmdmessagefld->cmdmessage.Length=allxfer_len;
1801 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1802 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1803 		}
1804 		break;
1805 	case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
1806 			u_int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
1807 			u_int8_t * pQbuffer;
1808 			u_int8_t * ptmpuserbuffer=pcmdmessagefld->messagedatabuffer;
1809 
1810 			user_len=pcmdmessagefld->cmdmessage.Length;
1811 			/*check if data xfer length of this request will overflow my array qbuffer */
1812 			wqbuf_lastindex=acb->wqbuf_lastindex;
1813 			wqbuf_firstindex=acb->wqbuf_firstindex;
1814 			if(wqbuf_lastindex!=wqbuf_firstindex) {
1815 				arcmsr_post_ioctldata2iop(acb);
1816 				pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR;
1817 			} else {
1818 				my_empty_len=(wqbuf_firstindex-wqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1);
1819 				if(my_empty_len>=user_len) {
1820 					while(user_len>0) {
1821 						/*copy srb data to wqbuffer*/
1822 						pQbuffer= &acb->wqbuffer[acb->wqbuf_lastindex];
1823 						memcpy(pQbuffer, ptmpuserbuffer, 1);
1824 						acb->wqbuf_lastindex++;
1825 						acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1826 						/*if last index number set it to 0 */
1827 						ptmpuserbuffer++;
1828 						user_len--;
1829 					}
1830 					/*post fist Qbuffer*/
1831 					if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
1832 						acb->acb_flags &=~ACB_F_MESSAGE_WQBUFFER_CLEARED;
1833 						arcmsr_post_ioctldata2iop(acb);
1834 					}
1835 					pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1836 				} else {
1837 					pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR;
1838 				}
1839 			}
1840 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1841 		}
1842 		break;
1843 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
1844 			u_int8_t * pQbuffer=acb->rqbuffer;
1845 
1846 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1847 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1848 				arcmsr_iop_message_read(acb);
1849 				/*signature, let IOP know data has been readed */
1850 			}
1851 			acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
1852 			acb->rqbuf_firstindex=0;
1853 			acb->rqbuf_lastindex=0;
1854 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1855 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1856 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1857 		}
1858 		break;
1859 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
1860 		{
1861 			u_int8_t * pQbuffer=acb->wqbuffer;
1862 
1863 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1864 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1865                 arcmsr_iop_message_read(acb);
1866 				/*signature, let IOP know data has been readed */
1867 			}
1868 			acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ);
1869 			acb->wqbuf_firstindex=0;
1870 			acb->wqbuf_lastindex=0;
1871 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1872 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1873 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1874 		}
1875 		break;
1876 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
1877 			u_int8_t * pQbuffer;
1878 
1879 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1880 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1881                 arcmsr_iop_message_read(acb);
1882 				/*signature, let IOP know data has been readed */
1883 			}
1884 			acb->acb_flags  |= (ACB_F_MESSAGE_WQBUFFER_CLEARED
1885 					|ACB_F_MESSAGE_RQBUFFER_CLEARED
1886 					|ACB_F_MESSAGE_WQBUFFER_READ);
1887 			acb->rqbuf_firstindex=0;
1888 			acb->rqbuf_lastindex=0;
1889 			acb->wqbuf_firstindex=0;
1890 			acb->wqbuf_lastindex=0;
1891 			pQbuffer=acb->rqbuffer;
1892 			memset(pQbuffer, 0, sizeof(struct QBUFFER));
1893 			pQbuffer=acb->wqbuffer;
1894 			memset(pQbuffer, 0, sizeof(struct QBUFFER));
1895 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1896 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1897 		}
1898 		break;
1899 	case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: {
1900 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_3F;
1901 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1902 		}
1903 		break;
1904 	case ARCMSR_MESSAGE_SAY_HELLO: {
1905 			u_int8_t * hello_string="Hello! I am ARCMSR";
1906 			u_int8_t * puserbuffer=(u_int8_t *)pcmdmessagefld->messagedatabuffer;
1907 
1908 			if(memcpy(puserbuffer, hello_string, (int16_t)strlen(hello_string))) {
1909 				pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR;
1910 				ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1911 				return ENOIOCTL;
1912 			}
1913 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1914 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1915 		}
1916 		break;
1917 	case ARCMSR_MESSAGE_SAY_GOODBYE: {
1918 			arcmsr_iop_parking(acb);
1919 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1920 		}
1921 		break;
1922 	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: {
1923 			arcmsr_flush_adapter_cache(acb);
1924 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1925 		}
1926 		break;
1927 	}
1928 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1929 	return retvalue;
1930 }
1931 /*
1932 **************************************************************************
1933 **************************************************************************
1934 */
1935 struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb)
1936 {
1937 	struct CommandControlBlock *srb=NULL;
1938 	u_int32_t workingsrb_startindex, workingsrb_doneindex;
1939 
1940 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1941 	workingsrb_doneindex=acb->workingsrb_doneindex;
1942 	workingsrb_startindex=acb->workingsrb_startindex;
1943 	srb=acb->srbworkingQ[workingsrb_startindex];
1944 	workingsrb_startindex++;
1945 	workingsrb_startindex %= ARCMSR_MAX_FREESRB_NUM;
1946 	if(workingsrb_doneindex!=workingsrb_startindex) {
1947 		acb->workingsrb_startindex=workingsrb_startindex;
1948 	} else {
1949 		srb=NULL;
1950 	}
1951 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1952 	return(srb);
1953 }
1954 /*
1955 **************************************************************************
1956 **************************************************************************
1957 */
1958 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb)
1959 {
1960 	struct CMD_MESSAGE_FIELD * pcmdmessagefld;
1961 	int retvalue = 0, transfer_len = 0;
1962 	char *buffer;
1963 	u_int32_t controlcode = (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[5] << 24 |
1964 				(u_int32_t ) pccb->csio.cdb_io.cdb_bytes[6] << 16 |
1965 				(u_int32_t ) pccb->csio.cdb_io.cdb_bytes[7] << 8  |
1966 				(u_int32_t ) pccb->csio.cdb_io.cdb_bytes[8];
1967 					/* 4 bytes: Areca io control code */
1968 	if((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
1969 		buffer = pccb->csio.data_ptr;
1970 		transfer_len = pccb->csio.dxfer_len;
1971 	} else {
1972 		retvalue = ARCMSR_MESSAGE_FAIL;
1973 		goto message_out;
1974 	}
1975 	if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
1976 		retvalue = ARCMSR_MESSAGE_FAIL;
1977 		goto message_out;
1978 	}
1979 	pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer;
1980 	switch(controlcode) {
1981 	case ARCMSR_MESSAGE_READ_RQBUFFER: {
1982 			u_int8_t *pQbuffer;
1983 			u_int8_t *ptmpQbuffer=pcmdmessagefld->messagedatabuffer;
1984 			int32_t allxfer_len = 0;
1985 
1986 			while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
1987 				&& (allxfer_len < 1031)) {
1988 				pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
1989 				memcpy(ptmpQbuffer, pQbuffer, 1);
1990 				acb->rqbuf_firstindex++;
1991 				acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
1992 				ptmpQbuffer++;
1993 				allxfer_len++;
1994 			}
1995 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1996 				struct QBUFFER  *prbuffer;
1997 				u_int8_t  *iop_data;
1998 				int32_t iop_len;
1999 
2000 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2001 				prbuffer=arcmsr_get_iop_rqbuffer(acb);
2002 				iop_data = (u_int8_t *)prbuffer->data;
2003 				iop_len =(u_int32_t)prbuffer->data_len;
2004 				while (iop_len > 0) {
2005 			        pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex];
2006 					memcpy(pQbuffer, iop_data, 1);
2007 					acb->rqbuf_lastindex++;
2008 					acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
2009 					iop_data++;
2010 					iop_len--;
2011 				}
2012 				arcmsr_iop_message_read(acb);
2013 			}
2014 			pcmdmessagefld->cmdmessage.Length = allxfer_len;
2015 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2016 			retvalue=ARCMSR_MESSAGE_SUCCESS;
2017 		}
2018 		break;
2019 	case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
2020 			int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
2021 			u_int8_t *pQbuffer;
2022 			u_int8_t *ptmpuserbuffer=pcmdmessagefld->messagedatabuffer;
2023 
2024 			user_len = pcmdmessagefld->cmdmessage.Length;
2025 			wqbuf_lastindex = acb->wqbuf_lastindex;
2026 			wqbuf_firstindex = acb->wqbuf_firstindex;
2027 			if (wqbuf_lastindex != wqbuf_firstindex) {
2028 				arcmsr_post_ioctldata2iop(acb);
2029 				/* has error report sensedata */
2030 			    if(&pccb->csio.sense_data) {
2031 				((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70);
2032 				/* Valid,ErrorCode */
2033 				((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05;
2034 				/* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */
2035 				((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A;
2036 				/* AdditionalSenseLength */
2037 				((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20;
2038 				/* AdditionalSenseCode */
2039 				}
2040 				retvalue = ARCMSR_MESSAGE_FAIL;
2041 			} else {
2042 				my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
2043 						&(ARCMSR_MAX_QBUFFER - 1);
2044 				if (my_empty_len >= user_len) {
2045 					while (user_len > 0) {
2046 						pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex];
2047 						memcpy(pQbuffer, ptmpuserbuffer, 1);
2048 						acb->wqbuf_lastindex++;
2049 						acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
2050 						ptmpuserbuffer++;
2051 						user_len--;
2052 					}
2053 					if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
2054 						acb->acb_flags &=
2055 						~ACB_F_MESSAGE_WQBUFFER_CLEARED;
2056 						arcmsr_post_ioctldata2iop(acb);
2057 					}
2058 				} else {
2059 					/* has error report sensedata */
2060 					if(&pccb->csio.sense_data) {
2061 					((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70);
2062 					/* Valid,ErrorCode */
2063 					((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05;
2064 					/* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */
2065 					((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A;
2066 					/* AdditionalSenseLength */
2067 					((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20;
2068 					/* AdditionalSenseCode */
2069 					}
2070 					retvalue = ARCMSR_MESSAGE_FAIL;
2071 				}
2072 			}
2073 		}
2074 		break;
2075 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
2076 			u_int8_t *pQbuffer = acb->rqbuffer;
2077 
2078 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2079 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2080 				arcmsr_iop_message_read(acb);
2081 			}
2082 			acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
2083 			acb->rqbuf_firstindex = 0;
2084 			acb->rqbuf_lastindex = 0;
2085 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2086 			pcmdmessagefld->cmdmessage.ReturnCode =
2087 			ARCMSR_MESSAGE_RETURNCODE_OK;
2088 		}
2089 		break;
2090 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
2091 			u_int8_t *pQbuffer = acb->wqbuffer;
2092 
2093 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2094 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2095 				arcmsr_iop_message_read(acb);
2096 			}
2097 			acb->acb_flags |=
2098 				(ACB_F_MESSAGE_WQBUFFER_CLEARED |
2099 					ACB_F_MESSAGE_WQBUFFER_READ);
2100 			acb->wqbuf_firstindex = 0;
2101 			acb->wqbuf_lastindex = 0;
2102 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2103 			pcmdmessagefld->cmdmessage.ReturnCode =
2104 				ARCMSR_MESSAGE_RETURNCODE_OK;
2105 		}
2106 		break;
2107 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
2108 			u_int8_t *pQbuffer;
2109 
2110 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2111 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2112 				arcmsr_iop_message_read(acb);
2113 			}
2114 			acb->acb_flags |=
2115 				(ACB_F_MESSAGE_WQBUFFER_CLEARED
2116 				| ACB_F_MESSAGE_RQBUFFER_CLEARED
2117 				| ACB_F_MESSAGE_WQBUFFER_READ);
2118 			acb->rqbuf_firstindex = 0;
2119 			acb->rqbuf_lastindex = 0;
2120 			acb->wqbuf_firstindex = 0;
2121 			acb->wqbuf_lastindex = 0;
2122 			pQbuffer = acb->rqbuffer;
2123 			memset(pQbuffer, 0, sizeof (struct QBUFFER));
2124 			pQbuffer = acb->wqbuffer;
2125 			memset(pQbuffer, 0, sizeof (struct QBUFFER));
2126 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2127 		}
2128 		break;
2129 	case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: {
2130 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
2131 		}
2132 		break;
2133 	case ARCMSR_MESSAGE_SAY_HELLO: {
2134 			int8_t * hello_string = "Hello! I am ARCMSR";
2135 
2136 			memcpy(pcmdmessagefld->messagedatabuffer, hello_string
2137 				, (int16_t)strlen(hello_string));
2138 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2139 		}
2140 		break;
2141 	case ARCMSR_MESSAGE_SAY_GOODBYE:
2142 		arcmsr_iop_parking(acb);
2143 		break;
2144 	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
2145 		arcmsr_flush_adapter_cache(acb);
2146 		break;
2147 	default:
2148 		retvalue = ARCMSR_MESSAGE_FAIL;
2149 	}
2150 message_out:
2151 	return retvalue;
2152 }
2153 /*
2154 *********************************************************************
2155 *********************************************************************
2156 */
2157 static void arcmsr_executesrb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2158 {
2159 	struct CommandControlBlock *srb=(struct CommandControlBlock *)arg;
2160 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)srb->acb;
2161 	union ccb * pccb;
2162 	int target, lun;
2163 
2164 	pccb=srb->pccb;
2165 	target=pccb->ccb_h.target_id;
2166 	lun=pccb->ccb_h.target_lun;
2167 	if(error != 0) {
2168 		if(error != EFBIG) {
2169 			kprintf("arcmsr%d: unexpected error %x"
2170 				" returned from 'bus_dmamap_load' \n"
2171 				, acb->pci_unit, error);
2172 		}
2173 		if((pccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
2174 			pccb->ccb_h.status |= CAM_REQ_TOO_BIG;
2175 		}
2176 		arcmsr_srb_complete(srb, 0);
2177 		return;
2178 	}
2179 	if(nseg > ARCMSR_MAX_SG_ENTRIES) {
2180 		pccb->ccb_h.status |= CAM_REQ_TOO_BIG;
2181 		arcmsr_srb_complete(srb, 0);
2182 		return;
2183 	}
2184 	if(acb->acb_flags & ACB_F_BUS_RESET) {
2185 		kprintf("arcmsr%d: bus reset and return busy \n", acb->pci_unit);
2186 		pccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2187 		arcmsr_srb_complete(srb, 0);
2188 		return;
2189 	}
2190 	if(acb->devstate[target][lun]==ARECA_RAID_GONE) {
2191 		u_int8_t block_cmd;
2192 
2193 		block_cmd=pccb->csio.cdb_io.cdb_bytes[0] & 0x0f;
2194 		if(block_cmd==0x08 || block_cmd==0x0a) {
2195 			kprintf("arcmsr%d:block 'read/write' command "
2196 				"with gone raid volume Cmd=%2x, TargetId=%d, Lun=%d \n"
2197 				, acb->pci_unit, block_cmd, target, lun);
2198 			pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
2199 			arcmsr_srb_complete(srb, 0);
2200 			return;
2201 		}
2202 	}
2203 	if((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2204 		if(nseg != 0) {
2205 			bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap);
2206 		}
2207 		arcmsr_srb_complete(srb, 0);
2208 		return;
2209 	}
2210 	if(acb->srboutstandingcount >= ARCMSR_MAX_OUTSTANDING_CMD) {
2211 		xpt_freeze_simq(acb->psim, 1);
2212 		pccb->ccb_h.status = CAM_REQUEUE_REQ;
2213 		acb->acb_flags |= ACB_F_CAM_DEV_QFRZN;
2214 		arcmsr_srb_complete(srb, 0);
2215 		return;
2216 	}
2217 	pccb->ccb_h.status |= CAM_SIM_QUEUED;
2218 	arcmsr_build_srb(srb, dm_segs, nseg);
2219 /*	if (pccb->ccb_h.timeout != CAM_TIME_INFINITY)
2220 		callout_reset(&srb->ccb_callout, (pccb->ccb_h.timeout * hz) / 1000, arcmsr_srb_timeout, srb);
2221 */
2222 	arcmsr_post_srb(acb, srb);
2223 	return;
2224 }
2225 /*
2226 *****************************************************************************************
2227 *****************************************************************************************
2228 */
2229 static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb)
2230 {
2231 	struct CommandControlBlock *srb;
2232 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *) abortccb->ccb_h.arcmsr_ccbacb_ptr;
2233 	u_int32_t intmask_org;
2234 	int i=0;
2235 
2236 	acb->num_aborts++;
2237 	/*
2238 	***************************************************************************
2239 	** It is the upper layer do abort command this lock just prior to calling us.
2240 	** First determine if we currently own this command.
2241 	** Start by searching the device queue. If not found
2242 	** at all, and the system wanted us to just abort the
2243 	** command return success.
2244 	***************************************************************************
2245 	*/
2246 	if(acb->srboutstandingcount!=0) {
2247 		for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
2248 			srb=acb->psrb_pool[i];
2249 			if(srb->startdone==ARCMSR_SRB_START) {
2250 				if(srb->pccb==abortccb) {
2251 					srb->startdone=ARCMSR_SRB_ABORTED;
2252 					kprintf("arcmsr%d:scsi id=%d lun=%d abort srb '%p'"
2253 						"outstanding command \n"
2254 						, acb->pci_unit, abortccb->ccb_h.target_id
2255 						, abortccb->ccb_h.target_lun, srb);
2256 					goto abort_outstanding_cmd;
2257 				}
2258 			}
2259 		}
2260 	}
2261 	return(FALSE);
2262 abort_outstanding_cmd:
2263 	/* disable all outbound interrupt */
2264 	intmask_org=arcmsr_disable_allintr(acb);
2265 	arcmsr_polling_srbdone(acb, srb);
2266 	/* enable outbound Post Queue, outbound doorbell Interrupt */
2267 	arcmsr_enable_allintr(acb, intmask_org);
2268 	return (TRUE);
2269 }
2270 /*
2271 ****************************************************************************
2272 ****************************************************************************
2273 */
2274 static void arcmsr_bus_reset(struct AdapterControlBlock *acb)
2275 {
2276 	int retry=0;
2277 
2278 	acb->num_resets++;
2279 	acb->acb_flags |=ACB_F_BUS_RESET;
2280 	while(acb->srboutstandingcount!=0 && retry < 400) {
2281 		arcmsr_interrupt(acb);
2282 		UDELAY(25000);
2283 		retry++;
2284 	}
2285 	arcmsr_iop_reset(acb);
2286 	acb->acb_flags &= ~ACB_F_BUS_RESET;
2287 	return;
2288 }
2289 /*
2290 **************************************************************************
2291 **************************************************************************
2292 */
2293 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
2294 		union ccb * pccb)
2295 {
2296 	pccb->ccb_h.status |= CAM_REQ_CMP;
2297 	switch (pccb->csio.cdb_io.cdb_bytes[0]) {
2298 	case INQUIRY: {
2299 		unsigned char inqdata[36];
2300 		char *buffer=pccb->csio.data_ptr;
2301 
2302 		if (pccb->ccb_h.target_lun) {
2303 			pccb->ccb_h.status |= CAM_SEL_TIMEOUT;
2304 			xpt_done(pccb);
2305 			return;
2306 		}
2307 		inqdata[0] = T_PROCESSOR;
2308 		/* Periph Qualifier & Periph Dev Type */
2309 		inqdata[1] = 0;
2310 		/* rem media bit & Dev Type Modifier */
2311 		inqdata[2] = 0;
2312 		/* ISO, ECMA, & ANSI versions */
2313 		inqdata[4] = 31;
2314 		/* length of additional data */
2315 		strncpy(&inqdata[8], "Areca   ", 8);
2316 		/* Vendor Identification */
2317 		strncpy(&inqdata[16], "RAID controller ", 16);
2318 		/* Product Identification */
2319 		strncpy(&inqdata[32], "R001", 4); /* Product Revision */
2320 		memcpy(buffer, inqdata, sizeof(inqdata));
2321 		xpt_done(pccb);
2322 	}
2323 	break;
2324 	case WRITE_BUFFER:
2325 	case READ_BUFFER: {
2326 		if (arcmsr_iop_message_xfer(acb, pccb)) {
2327 			pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2328 			pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
2329 		}
2330 		xpt_done(pccb);
2331 	}
2332 	break;
2333 	default:
2334 		xpt_done(pccb);
2335 	}
2336 }
2337 /*
2338 *********************************************************************
2339 *********************************************************************
2340 */
2341 static void arcmsr_action(struct cam_sim * psim, union ccb * pccb)
2342 {
2343 	struct AdapterControlBlock *  acb;
2344 
2345 	acb=(struct AdapterControlBlock *) cam_sim_softc(psim);
2346 	if(acb==NULL) {
2347 		pccb->ccb_h.status |= CAM_REQ_INVALID;
2348 		xpt_done(pccb);
2349 		return;
2350 	}
2351 	switch (pccb->ccb_h.func_code) {
2352 	case XPT_SCSI_IO: {
2353 			struct CommandControlBlock *srb;
2354 			int target=pccb->ccb_h.target_id;
2355 
2356 			if(target == 16) {
2357 				/* virtual device for iop message transfer */
2358 				arcmsr_handle_virtual_command(acb, pccb);
2359 				return;
2360 			}
2361 			if((srb=arcmsr_get_freesrb(acb)) == NULL) {
2362 				pccb->ccb_h.status |= CAM_RESRC_UNAVAIL;
2363 				xpt_done(pccb);
2364 				return;
2365 			}
2366 			pccb->ccb_h.arcmsr_ccbsrb_ptr=srb;
2367 			pccb->ccb_h.arcmsr_ccbacb_ptr=acb;
2368 			srb->pccb=pccb;
2369 			if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2370 				if(!(pccb->ccb_h.flags & CAM_SCATTER_VALID)) {
2371 					/* Single buffer */
2372 					if(!(pccb->ccb_h.flags & CAM_DATA_PHYS)) {
2373 						/* Buffer is virtual */
2374 						u_int32_t error;
2375 
2376 						crit_enter();
2377 						error =	bus_dmamap_load(acb->dm_segs_dmat
2378 							, srb->dm_segs_dmamap
2379 							, pccb->csio.data_ptr
2380 							, pccb->csio.dxfer_len
2381 							, arcmsr_executesrb, srb, /*flags*/0);
2382 						if(error == EINPROGRESS) {
2383 							xpt_freeze_simq(acb->psim, 1);
2384 							pccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2385 						}
2386 						crit_exit();
2387 					} else {
2388 						/* Buffer is physical */
2389 						panic("arcmsr: CAM_DATA_PHYS not supported");
2390 					}
2391 				} else {
2392 					/* Scatter/gather list */
2393 					struct bus_dma_segment *segs;
2394 
2395 					if((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0
2396 					|| (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
2397 						pccb->ccb_h.status |= CAM_PROVIDE_FAIL;
2398 						xpt_done(pccb);
2399 						kfree(srb, M_DEVBUF);
2400 						return;
2401 					}
2402 					segs=(struct bus_dma_segment *)pccb->csio.data_ptr;
2403 					arcmsr_executesrb(srb, segs, pccb->csio.sglist_cnt, 0);
2404 				}
2405 			} else {
2406 				arcmsr_executesrb(srb, NULL, 0, 0);
2407 			}
2408 			break;
2409 		}
2410 	case XPT_TARGET_IO: {
2411 			/* target mode not yet support vendor specific commands. */
2412 			pccb->ccb_h.status |= CAM_REQ_CMP;
2413 			xpt_done(pccb);
2414 			break;
2415 		}
2416 	case XPT_PATH_INQ: {
2417 			struct ccb_pathinq *cpi= &pccb->cpi;
2418 
2419 			cpi->version_num=1;
2420 			cpi->hba_inquiry=PI_SDTR_ABLE | PI_TAG_ABLE;
2421 			cpi->target_sprt=0;
2422 			cpi->hba_misc=0;
2423 			cpi->hba_eng_cnt=0;
2424 			cpi->max_target=ARCMSR_MAX_TARGETID;        /* 0-16 */
2425 			cpi->max_lun=ARCMSR_MAX_TARGETLUN;	    /* 0-7 */
2426 			cpi->initiator_id=ARCMSR_SCSI_INITIATOR_ID; /* 255 */
2427 			cpi->bus_id=cam_sim_bus(psim);
2428 			strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2429 			strncpy(cpi->hba_vid, "ARCMSR", HBA_IDLEN);
2430 			strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN);
2431 			cpi->unit_number=cam_sim_unit(psim);
2432 		#ifdef	CAM_NEW_TRAN_CODE
2433 			cpi->transport = XPORT_SPI;
2434 			cpi->transport_version = 2;
2435 			cpi->protocol = PROTO_SCSI;
2436 			cpi->protocol_version = SCSI_REV_2;
2437 		#endif
2438 			cpi->ccb_h.status |= CAM_REQ_CMP;
2439 			xpt_done(pccb);
2440 			break;
2441 		}
2442 	case XPT_ABORT: {
2443 			union ccb *pabort_ccb;
2444 
2445 			pabort_ccb=pccb->cab.abort_ccb;
2446 			switch (pabort_ccb->ccb_h.func_code) {
2447 			case XPT_ACCEPT_TARGET_IO:
2448 			case XPT_IMMED_NOTIFY:
2449 			case XPT_CONT_TARGET_IO:
2450 				if(arcmsr_seek_cmd2abort(pabort_ccb)==TRUE) {
2451 					pabort_ccb->ccb_h.status |= CAM_REQ_ABORTED;
2452 					xpt_done(pabort_ccb);
2453 					pccb->ccb_h.status |= CAM_REQ_CMP;
2454 				} else {
2455 					xpt_print_path(pabort_ccb->ccb_h.path);
2456 					kprintf("Not found\n");
2457 					pccb->ccb_h.status |= CAM_PATH_INVALID;
2458 				}
2459 				break;
2460 			case XPT_SCSI_IO:
2461 				pccb->ccb_h.status |= CAM_UA_ABORT;
2462 				break;
2463 			default:
2464 				pccb->ccb_h.status |= CAM_REQ_INVALID;
2465 				break;
2466 			}
2467 			xpt_done(pccb);
2468 			break;
2469 		}
2470 	case XPT_RESET_BUS:
2471 	case XPT_RESET_DEV: {
2472 			u_int32_t     i;
2473 
2474 			arcmsr_bus_reset(acb);
2475 			for (i=0; i < 500; i++) {
2476 				DELAY(1000);
2477 			}
2478 			pccb->ccb_h.status |= CAM_REQ_CMP;
2479 			xpt_done(pccb);
2480 			break;
2481 		}
2482 	case XPT_TERM_IO: {
2483 			pccb->ccb_h.status |= CAM_REQ_INVALID;
2484 			xpt_done(pccb);
2485 			break;
2486 		}
2487 	case XPT_GET_TRAN_SETTINGS: {
2488 			struct ccb_trans_settings *cts;
2489 
2490 			if(pccb->ccb_h.target_id == 16) {
2491 				pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
2492 				xpt_done(pccb);
2493 				break;
2494 			}
2495 			cts= &pccb->cts;
2496 		#ifdef	CAM_NEW_TRAN_CODE
2497 			{
2498 				struct ccb_trans_settings_scsi *scsi;
2499 				struct ccb_trans_settings_spi *spi;
2500 
2501 				scsi = &cts->proto_specific.scsi;
2502 				spi = &cts->xport_specific.spi;
2503 				cts->protocol = PROTO_SCSI;
2504 				cts->protocol_version = SCSI_REV_2;
2505 				cts->transport = XPORT_SPI;
2506 				cts->transport_version = 2;
2507 				spi->flags = CTS_SPI_FLAGS_DISC_ENB;
2508 				spi->sync_period=3;
2509 				spi->sync_offset=32;
2510 				spi->bus_width=MSG_EXT_WDTR_BUS_16_BIT;
2511 				scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2512 				spi->valid = CTS_SPI_VALID_DISC
2513 					| CTS_SPI_VALID_SYNC_RATE
2514 					| CTS_SPI_VALID_SYNC_OFFSET
2515 					| CTS_SPI_VALID_BUS_WIDTH;
2516 				scsi->valid = CTS_SCSI_VALID_TQ;
2517 			}
2518 		#else
2519 			{
2520 				cts->flags=(CCB_TRANS_DISC_ENB | CCB_TRANS_TAG_ENB);
2521 				cts->sync_period=3;
2522 				cts->sync_offset=32;
2523 				cts->bus_width=MSG_EXT_WDTR_BUS_16_BIT;
2524 				cts->valid=CCB_TRANS_SYNC_RATE_VALID |
2525 				CCB_TRANS_SYNC_OFFSET_VALID |
2526 				CCB_TRANS_BUS_WIDTH_VALID |
2527 				CCB_TRANS_DISC_VALID |
2528 				CCB_TRANS_TQ_VALID;
2529 			}
2530 		#endif
2531 			pccb->ccb_h.status |= CAM_REQ_CMP;
2532 			xpt_done(pccb);
2533 			break;
2534 		}
2535 	case XPT_SET_TRAN_SETTINGS: {
2536 			pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
2537 			xpt_done(pccb);
2538 			break;
2539 		}
2540 	case XPT_CALC_GEOMETRY: {
2541 			struct ccb_calc_geometry *ccg;
2542 			u_int32_t size_mb;
2543 			u_int32_t secs_per_cylinder;
2544 
2545 			if(pccb->ccb_h.target_id == 16) {
2546 				pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
2547 				xpt_done(pccb);
2548 				break;
2549 			}
2550 			ccg= &pccb->ccg;
2551 			if (ccg->block_size == 0) {
2552 				pccb->ccb_h.status = CAM_REQ_INVALID;
2553 				xpt_done(pccb);
2554 				break;
2555 			}
2556 			if(((1024L * 1024L)/ccg->block_size) < 0) {
2557 				pccb->ccb_h.status = CAM_REQ_INVALID;
2558 				xpt_done(pccb);
2559 				break;
2560 			}
2561 			size_mb=ccg->volume_size/((1024L * 1024L)/ccg->block_size);
2562 			if(size_mb > 1024 ) {
2563 				ccg->heads=255;
2564 				ccg->secs_per_track=63;
2565 			} else {
2566 				ccg->heads=64;
2567 				ccg->secs_per_track=32;
2568 			}
2569 			secs_per_cylinder=ccg->heads * ccg->secs_per_track;
2570 			ccg->cylinders=ccg->volume_size / secs_per_cylinder;
2571 			pccb->ccb_h.status |= CAM_REQ_CMP;
2572 			xpt_done(pccb);
2573 			break;
2574 		}
2575 	default:
2576 		pccb->ccb_h.status |= CAM_REQ_INVALID;
2577 		xpt_done(pccb);
2578 		break;
2579 	}
2580 	return;
2581 }
2582 /*
2583 **********************************************************************
2584 **********************************************************************
2585 */
2586 static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
2587 {
2588 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
2589 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
2590 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
2591 		kprintf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit);
2592 	}
2593 	return;
2594 }
2595 /*
2596 **********************************************************************
2597 **********************************************************************
2598 */
2599 static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb)
2600 {
2601 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
2602 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell,  ARCMSR_MESSAGE_START_BGRB);
2603 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
2604 		kprintf( "arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit);
2605 	}
2606 	return;
2607 }
2608 /*
2609 **********************************************************************
2610 **********************************************************************
2611 */
2612 static void arcmsr_start_hbc_bgrb(struct AdapterControlBlock *acb)
2613 {
2614 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
2615 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
2616 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
2617 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
2618 		kprintf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit);
2619 	}
2620 	return;
2621 }
2622 /*
2623 **********************************************************************
2624 **********************************************************************
2625 */
2626 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
2627 {
2628 	switch (acb->adapter_type) {
2629 	case ACB_ADAPTER_TYPE_A:
2630 		arcmsr_start_hba_bgrb(acb);
2631 		break;
2632 	case ACB_ADAPTER_TYPE_B:
2633 		arcmsr_start_hbb_bgrb(acb);
2634 		break;
2635 	case ACB_ADAPTER_TYPE_C:
2636 		arcmsr_start_hbc_bgrb(acb);
2637 		break;
2638 	}
2639 	return;
2640 }
2641 /*
2642 **********************************************************************
2643 **
2644 **********************************************************************
2645 */
2646 static void arcmsr_polling_hba_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2647 {
2648 	struct CommandControlBlock *srb;
2649 	u_int32_t flag_srb, outbound_intstatus, poll_srb_done=0, poll_count=0;
2650 	u_int16_t	error;
2651 
2652 polling_ccb_retry:
2653 	poll_count++;
2654 	outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
2655 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);	/*clear interrupt*/
2656 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2657 	while(1) {
2658 		if((flag_srb=CHIP_REG_READ32(HBA_MessageUnit,
2659 			0, outbound_queueport))==0xFFFFFFFF) {
2660 			if(poll_srb_done) {
2661 				break;/*chip FIFO no ccb for completion already*/
2662 			} else {
2663 				UDELAY(25000);
2664 				if ((poll_count > 100) && (poll_srb != NULL)) {
2665 					break;
2666 				}
2667 				goto polling_ccb_retry;
2668 			}
2669 		}
2670 		/* check if command done with no error*/
2671 		srb=(struct CommandControlBlock *)
2672 			(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
2673         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
2674 		poll_srb_done = (srb==poll_srb) ? 1:0;
2675 		if((srb->acb!=acb) || (srb->startdone!=ARCMSR_SRB_START)) {
2676 			if(srb->startdone==ARCMSR_SRB_ABORTED) {
2677 				kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p'"
2678 					"poll command abort successfully \n"
2679 					, acb->pci_unit
2680 					, srb->pccb->ccb_h.target_id
2681 					, srb->pccb->ccb_h.target_lun, srb);
2682 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
2683 				arcmsr_srb_complete(srb, 1);
2684 				continue;
2685 			}
2686 			kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'"
2687 				"srboutstandingcount=%d \n"
2688 				, acb->pci_unit
2689 				, srb, acb->srboutstandingcount);
2690 			continue;
2691 		}
2692 		arcmsr_report_srb_state(acb, srb, error);
2693 	}	/*drain reply FIFO*/
2694 	return;
2695 }
2696 /*
2697 **********************************************************************
2698 **
2699 **********************************************************************
2700 */
2701 static void arcmsr_polling_hbb_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2702 {
2703 	struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
2704 	struct CommandControlBlock *srb;
2705 	u_int32_t flag_srb, poll_srb_done=0, poll_count=0;
2706 	int index;
2707 	u_int16_t	error;
2708 
2709 polling_ccb_retry:
2710 	poll_count++;
2711 	CHIP_REG_WRITE32(HBB_DOORBELL,
2712 	0, iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */
2713 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2714 	while(1) {
2715 		index=phbbmu->doneq_index;
2716 		if((flag_srb=phbbmu->done_qbuffer[index]) == 0) {
2717 			if(poll_srb_done) {
2718 				break;/*chip FIFO no ccb for completion already*/
2719 			} else {
2720 				UDELAY(25000);
2721 			    if ((poll_count > 100) && (poll_srb != NULL)) {
2722 					break;
2723 				}
2724 				goto polling_ccb_retry;
2725 			}
2726 		}
2727 		phbbmu->done_qbuffer[index]=0;
2728 		index++;
2729 		index %= ARCMSR_MAX_HBB_POSTQUEUE;     /*if last index number set it to 0 */
2730 		phbbmu->doneq_index=index;
2731 		/* check if command done with no error*/
2732 		srb=(struct CommandControlBlock *)
2733 			(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
2734         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
2735 		poll_srb_done = (srb==poll_srb) ? 1:0;
2736 		if((srb->acb!=acb) || (srb->startdone!=ARCMSR_SRB_START)) {
2737 			if(srb->startdone==ARCMSR_SRB_ABORTED) {
2738 				kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p'"
2739 					"poll command abort successfully \n"
2740 					, acb->pci_unit
2741 					, srb->pccb->ccb_h.target_id
2742 					, srb->pccb->ccb_h.target_lun, srb);
2743 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
2744 				arcmsr_srb_complete(srb, 1);
2745 				continue;
2746 			}
2747 			kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'"
2748 				"srboutstandingcount=%d \n"
2749 				, acb->pci_unit
2750 				, srb, acb->srboutstandingcount);
2751 			continue;
2752 		}
2753 		arcmsr_report_srb_state(acb, srb, error);
2754 	}	/*drain reply FIFO*/
2755 	return;
2756 }
2757 /*
2758 **********************************************************************
2759 **
2760 **********************************************************************
2761 */
2762 static void arcmsr_polling_hbc_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2763 {
2764 	struct CommandControlBlock *srb;
2765 	u_int32_t flag_srb, poll_srb_done=0, poll_count=0;
2766 	u_int16_t	error;
2767 
2768 polling_ccb_retry:
2769 	poll_count++;
2770 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2771 	while(1) {
2772 		if(!(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)) {
2773 			if(poll_srb_done) {
2774 				break;/*chip FIFO no ccb for completion already*/
2775 			} else {
2776 				UDELAY(25000);
2777 			    if ((poll_count > 100) && (poll_srb != NULL)) {
2778 					break;
2779 				}
2780 			    if (acb->srboutstandingcount == 0) {
2781 				    break;
2782 			    }
2783 				goto polling_ccb_retry;
2784 			}
2785 		}
2786 		flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
2787 		/* check if command done with no error*/
2788 		srb=(struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFF0));/*frame must be 32 bytes aligned*/
2789         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
2790 		if (poll_srb != NULL)
2791 			poll_srb_done = (srb==poll_srb) ? 1:0;
2792 		if((srb->acb!=acb) || (srb->startdone!=ARCMSR_SRB_START)) {
2793 			if(srb->startdone==ARCMSR_SRB_ABORTED) {
2794 				kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p'poll command abort successfully \n"
2795 						, acb->pci_unit, srb->pccb->ccb_h.target_id, srb->pccb->ccb_h.target_lun, srb);
2796 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
2797 				arcmsr_srb_complete(srb, 1);
2798 				continue;
2799 			}
2800 			kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'srboutstandingcount=%d \n"
2801 					, acb->pci_unit, srb, acb->srboutstandingcount);
2802 			continue;
2803 		}
2804 		arcmsr_report_srb_state(acb, srb, error);
2805 	}	/*drain reply FIFO*/
2806 	return;
2807 }
2808 /*
2809 **********************************************************************
2810 **********************************************************************
2811 */
2812 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2813 {
2814 	switch (acb->adapter_type) {
2815 	case ACB_ADAPTER_TYPE_A: {
2816 			arcmsr_polling_hba_srbdone(acb, poll_srb);
2817 		}
2818 		break;
2819 	case ACB_ADAPTER_TYPE_B: {
2820 			arcmsr_polling_hbb_srbdone(acb, poll_srb);
2821 		}
2822 		break;
2823 	case ACB_ADAPTER_TYPE_C: {
2824 			arcmsr_polling_hbc_srbdone(acb, poll_srb);
2825 		}
2826 		break;
2827 	}
2828 }
2829 /*
2830 **********************************************************************
2831 **********************************************************************
2832 */
2833 static void arcmsr_get_hba_config(struct AdapterControlBlock *acb)
2834 {
2835 	char *acb_firm_model=acb->firm_model;
2836 	char *acb_firm_version=acb->firm_version;
2837 	char *acb_device_map = acb->device_map;
2838 	size_t iop_firm_model=offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);	/*firm_model,15,60-67*/
2839 	size_t iop_firm_version=offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);	/*firm_version,17,68-83*/
2840 	size_t iop_device_map = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
2841 	int i;
2842 
2843 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
2844 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
2845 		kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
2846 	}
2847 	i=0;
2848 	while(i<8) {
2849 		*acb_firm_model=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i);
2850 		/* 8 bytes firm_model, 15, 60-67*/
2851 		acb_firm_model++;
2852 		i++;
2853 	}
2854 	i=0;
2855 	while(i<16) {
2856 		*acb_firm_version=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i);
2857 		/* 16 bytes firm_version, 17, 68-83*/
2858 		acb_firm_version++;
2859 		i++;
2860 	}
2861 	i=0;
2862 	while(i<16) {
2863 		*acb_device_map=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i);
2864 		acb_device_map++;
2865 		i++;
2866 	}
2867 	kprintf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
2868 	kprintf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version);
2869 	acb->firm_request_len=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[1]);   /*firm_request_len, 1, 04-07*/
2870 	acb->firm_numbers_queue=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/
2871 	acb->firm_sdram_size=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[3]);    /*firm_sdram_size, 3, 12-15*/
2872 	acb->firm_ide_channels=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[4]);  /*firm_ide_channels, 4, 16-19*/
2873 	acb->firm_cfg_version=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]);	/*firm_cfg_version,  25,	  */
2874 	return;
2875 }
2876 /*
2877 **********************************************************************
2878 **********************************************************************
2879 */
2880 static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
2881 {
2882 	char *acb_firm_model=acb->firm_model;
2883 	char *acb_firm_version=acb->firm_version;
2884 	char *acb_device_map = acb->device_map;
2885 	size_t iop_firm_model=offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);	/*firm_model,15,60-67*/
2886 	size_t iop_firm_version=offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);	/*firm_version,17,68-83*/
2887 	size_t iop_device_map = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
2888 	int i;
2889 
2890 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
2891 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
2892 		kprintf( "arcmsr%d: wait" "'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
2893 	}
2894 	i=0;
2895 	while(i<8) {
2896 		*acb_firm_model=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_model+i);
2897 		/* 8 bytes firm_model, 15, 60-67*/
2898 		acb_firm_model++;
2899 		i++;
2900 	}
2901 	i=0;
2902 	while(i<16) {
2903 		*acb_firm_version=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_version+i);
2904 		/* 16 bytes firm_version, 17, 68-83*/
2905 		acb_firm_version++;
2906 		i++;
2907 	}
2908 	i=0;
2909 	while(i<16) {
2910 		*acb_device_map=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_device_map+i);
2911 		acb_device_map++;
2912 		i++;
2913 	}
2914 	kprintf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
2915 	kprintf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version);
2916 	acb->firm_request_len=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1]);   /*firm_request_len, 1, 04-07*/
2917 	acb->firm_numbers_queue=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/
2918 	acb->firm_sdram_size=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3]);    /*firm_sdram_size, 3, 12-15*/
2919 	acb->firm_ide_channels=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4]);  /*firm_ide_channels, 4, 16-19*/
2920 	acb->firm_cfg_version=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]);	/*firm_cfg_version,  25,	  */
2921 	return;
2922 }
2923 /*
2924 **********************************************************************
2925 **********************************************************************
2926 */
2927 static void arcmsr_get_hbc_config(struct AdapterControlBlock *acb)
2928 {
2929 	char *acb_firm_model=acb->firm_model;
2930 	char *acb_firm_version=acb->firm_version;
2931 	char *acb_device_map = acb->device_map;
2932 	size_t iop_firm_model=offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);   /*firm_model,15,60-67*/
2933 	size_t iop_firm_version=offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/
2934 	size_t iop_device_map = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
2935 	int i;
2936 
2937 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
2938 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
2939 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
2940 		kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
2941 	}
2942 	i=0;
2943 	while(i<8) {
2944 		*acb_firm_model=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i);
2945 		/* 8 bytes firm_model, 15, 60-67*/
2946 		acb_firm_model++;
2947 		i++;
2948 	}
2949 	i=0;
2950 	while(i<16) {
2951 		*acb_firm_version=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i);
2952 		/* 16 bytes firm_version, 17, 68-83*/
2953 		acb_firm_version++;
2954 		i++;
2955 	}
2956 	i=0;
2957 	while(i<16) {
2958 		*acb_device_map=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i);
2959 		acb_device_map++;
2960 		i++;
2961 	}
2962 	kprintf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
2963 	kprintf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version);
2964 	acb->firm_request_len	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[1]);	/*firm_request_len,   1, 04-07*/
2965 	acb->firm_numbers_queue	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[2]);	/*firm_numbers_queue, 2, 08-11*/
2966 	acb->firm_sdram_size	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[3]);	/*firm_sdram_size,    3, 12-15*/
2967 	acb->firm_ide_channels	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[4]);	/*firm_ide_channels,  4, 16-19*/
2968 	acb->firm_cfg_version	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]);	/*firm_cfg_version,  25,	  */
2969 	return;
2970 }
2971 /*
2972 **********************************************************************
2973 **********************************************************************
2974 */
2975 static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
2976 {
2977 	switch (acb->adapter_type) {
2978 	case ACB_ADAPTER_TYPE_A: {
2979 			arcmsr_get_hba_config(acb);
2980 		}
2981 		break;
2982 	case ACB_ADAPTER_TYPE_B: {
2983 			arcmsr_get_hbb_config(acb);
2984 		}
2985 		break;
2986 	case ACB_ADAPTER_TYPE_C: {
2987 			arcmsr_get_hbc_config(acb);
2988 		}
2989 		break;
2990 	}
2991 	return;
2992 }
2993 /*
2994 **********************************************************************
2995 **********************************************************************
2996 */
2997 static void arcmsr_wait_firmware_ready( struct AdapterControlBlock *acb)
2998 {
2999 	int	timeout=0;
3000 
3001 	switch (acb->adapter_type) {
3002 	case ACB_ADAPTER_TYPE_A: {
3003 			while ((CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0)
3004 			{
3005 				if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3006 				{
3007 					kprintf( "arcmsr%d:timed out waiting for firmware \n", acb->pci_unit);
3008 					return;
3009 				}
3010 				UDELAY(15000); /* wait 15 milli-seconds */
3011 			}
3012 		}
3013 		break;
3014 	case ACB_ADAPTER_TYPE_B: {
3015 			while ((CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & ARCMSR_MESSAGE_FIRMWARE_OK) == 0)
3016 			{
3017 				if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3018 				{
3019 					kprintf( "arcmsr%d: timed out waiting for firmware \n", acb->pci_unit);
3020 					return;
3021 				}
3022 				UDELAY(15000); /* wait 15 milli-seconds */
3023 			}
3024 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
3025 		}
3026 		break;
3027 	case ACB_ADAPTER_TYPE_C: {
3028 			while ((CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0)
3029 			{
3030 				if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3031 				{
3032 					kprintf( "arcmsr%d:timed out waiting for firmware ready\n", acb->pci_unit);
3033 					return;
3034 				}
3035 				UDELAY(15000); /* wait 15 milli-seconds */
3036 			}
3037 		}
3038 		break;
3039 	}
3040 	return;
3041 }
3042 /*
3043 **********************************************************************
3044 **********************************************************************
3045 */
3046 static void arcmsr_clear_doorbell_queue_buffer( struct AdapterControlBlock *acb)
3047 {
3048 	u_int32_t outbound_doorbell;
3049 
3050 	switch (acb->adapter_type) {
3051 	case ACB_ADAPTER_TYPE_A: {
3052 			/* empty doorbell Qbuffer if door bell ringed */
3053 			outbound_doorbell = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell);
3054 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, outbound_doorbell);	/*clear doorbell interrupt */
3055 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
3056 
3057 		}
3058 		break;
3059 	case ACB_ADAPTER_TYPE_B: {
3060 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt and message state*/
3061 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK);
3062 			/* let IOP know data has been read */
3063 		}
3064 		break;
3065 	case ACB_ADAPTER_TYPE_C: {
3066 			/* empty doorbell Qbuffer if door bell ringed */
3067 			outbound_doorbell = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell);
3068 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell);	/*clear doorbell interrupt */
3069 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
3070 
3071 		}
3072 		break;
3073 	}
3074 	return;
3075 }
3076 /*
3077 ************************************************************************
3078 ************************************************************************
3079 */
3080 static u_int32_t arcmsr_iop_confirm(struct AdapterControlBlock *acb)
3081 {
3082 	unsigned long srb_phyaddr;
3083 	u_int32_t srb_phyaddr_hi32;
3084 
3085 	/*
3086 	********************************************************************
3087 	** here we need to tell iop 331 our freesrb.HighPart
3088 	** if freesrb.HighPart is not zero
3089 	********************************************************************
3090 	*/
3091 	srb_phyaddr= (unsigned long) acb->srb_phyaddr.phyaddr;
3092 //	srb_phyaddr_hi32=(u_int32_t) ((srb_phyaddr>>16)>>16);
3093 	srb_phyaddr_hi32=acb->srb_phyaddr.B.phyadd_high;
3094 	switch (acb->adapter_type) {
3095 	case ACB_ADAPTER_TYPE_A: {
3096 			if(srb_phyaddr_hi32!=0) {
3097 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG);
3098 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32);
3099 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG);
3100 				if(!arcmsr_hba_wait_msgint_ready(acb)) {
3101 					kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit);
3102 					return FALSE;
3103 				}
3104 			}
3105 		}
3106 		break;
3107 		/*
3108 		***********************************************************************
3109 		**    if adapter type B, set window of "post command Q"
3110 		***********************************************************************
3111 		*/
3112 	case ACB_ADAPTER_TYPE_B: {
3113 			u_int32_t post_queue_phyaddr;
3114 			struct HBB_MessageUnit *phbbmu;
3115 
3116 			phbbmu=(struct HBB_MessageUnit *)acb->pmu;
3117 			phbbmu->postq_index=0;
3118 			phbbmu->doneq_index=0;
3119 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_SET_POST_WINDOW);
3120 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3121 				kprintf( "arcmsr%d: 'set window of post command Q' timeout\n", acb->pci_unit);
3122 				return FALSE;
3123 			}
3124 			post_queue_phyaddr = srb_phyaddr + ARCMSR_MAX_FREESRB_NUM*sizeof(struct CommandControlBlock)
3125 			+ offsetof(struct HBB_MessageUnit, post_qbuffer);
3126 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); /* driver "set config" signature */
3127 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1], srb_phyaddr_hi32); /* normal should be zero */
3128 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2], post_queue_phyaddr); /* postQ size (256+8)*4 */
3129 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3], post_queue_phyaddr+1056); /* doneQ size (256+8)*4 */
3130 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4], 1056); /* srb maxQ size must be --> [(256+8)*4] */
3131 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_SET_CONFIG);
3132 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3133 				kprintf( "arcmsr%d: 'set command Q window' timeout \n", acb->pci_unit);
3134 				return FALSE;
3135 			}
3136 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_START_DRIVER_MODE);
3137 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3138 				kprintf( "arcmsr%d: 'start diver mode' timeout \n", acb->pci_unit);
3139 				return FALSE;
3140 			}
3141 		}
3142 		break;
3143 	case ACB_ADAPTER_TYPE_C: {
3144 			if(srb_phyaddr_hi32!=0) {
3145 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG);
3146 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32);
3147 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG);
3148 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3149 				if(!arcmsr_hbc_wait_msgint_ready(acb)) {
3150 					kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit);
3151 					return FALSE;
3152 				}
3153 			}
3154 		}
3155 		break;
3156 	}
3157 	return TRUE;
3158 }
3159 /*
3160 ************************************************************************
3161 ************************************************************************
3162 */
3163 static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
3164 {
3165 	switch (acb->adapter_type)
3166 	{
3167 	case ACB_ADAPTER_TYPE_A:
3168 	case ACB_ADAPTER_TYPE_C:
3169 		break;
3170 	case ACB_ADAPTER_TYPE_B: {
3171 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell,ARCMSR_MESSAGE_ACTIVE_EOI_MODE);
3172 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3173 				kprintf( "arcmsr%d: 'iop enable eoi mode' timeout \n", acb->pci_unit);
3174 
3175 				return;
3176 			}
3177 		}
3178 		break;
3179 	}
3180 	return;
3181 }
3182 /*
3183 **********************************************************************
3184 **********************************************************************
3185 */
3186 static void arcmsr_iop_init(struct AdapterControlBlock *acb)
3187 {
3188 	u_int32_t intmask_org;
3189 
3190 	/* disable all outbound interrupt */
3191 	intmask_org=arcmsr_disable_allintr(acb);
3192 	arcmsr_wait_firmware_ready(acb);
3193 	arcmsr_iop_confirm(acb);
3194 	arcmsr_get_firmware_spec(acb);
3195 	/*start background rebuild*/
3196 	arcmsr_start_adapter_bgrb(acb);
3197 	/* empty doorbell Qbuffer if door bell ringed */
3198 	arcmsr_clear_doorbell_queue_buffer(acb);
3199 	arcmsr_enable_eoi_mode(acb);
3200 	/* enable outbound Post Queue, outbound doorbell Interrupt */
3201 	arcmsr_enable_allintr(acb, intmask_org);
3202 	acb->acb_flags |=ACB_F_IOP_INITED;
3203 	return;
3204 }
3205 /*
3206 **********************************************************************
3207 **********************************************************************
3208 */
3209 static void arcmsr_map_freesrb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3210 {
3211 	struct AdapterControlBlock *acb=arg;
3212 	struct CommandControlBlock *srb_tmp;
3213 	u_int8_t * dma_memptr;
3214 	u_int32_t i;
3215 	unsigned long srb_phyaddr=(unsigned long)segs->ds_addr;
3216 
3217 	dma_memptr=acb->uncacheptr;
3218 	acb->srb_phyaddr.phyaddr=srb_phyaddr;
3219 	srb_tmp=(struct CommandControlBlock *)dma_memptr;
3220 	for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
3221 		if(bus_dmamap_create(acb->dm_segs_dmat,
3222 			 /*flags*/0, &srb_tmp->dm_segs_dmamap)!=0) {
3223 			acb->acb_flags |= ACB_F_MAPFREESRB_FAILD;
3224 			kprintf("arcmsr%d:"
3225 			" srb dmamap bus_dmamap_create error\n", acb->pci_unit);
3226 			return;
3227 		}
3228 		srb_tmp->cdb_shifted_phyaddr=(acb->adapter_type==ACB_ADAPTER_TYPE_C)?srb_phyaddr:(srb_phyaddr >> 5);
3229 		srb_tmp->acb=acb;
3230 		acb->srbworkingQ[i]=acb->psrb_pool[i]=srb_tmp;
3231 		srb_phyaddr=srb_phyaddr+sizeof(struct CommandControlBlock);
3232 		srb_tmp++;
3233 	}
3234 	acb->vir2phy_offset=(unsigned long)srb_tmp-(unsigned long)srb_phyaddr;
3235 	return;
3236 }
3237 /*
3238 ************************************************************************
3239 **
3240 **
3241 ************************************************************************
3242 */
3243 static void arcmsr_free_resource(struct AdapterControlBlock *acb)
3244 {
3245 	/* remove the control device */
3246 	if(acb->ioctl_dev != NULL) {
3247 		destroy_dev(acb->ioctl_dev);
3248 	}
3249 	bus_dmamap_unload(acb->srb_dmat, acb->srb_dmamap);
3250 	bus_dmamap_destroy(acb->srb_dmat, acb->srb_dmamap);
3251 	bus_dma_tag_destroy(acb->srb_dmat);
3252 	bus_dma_tag_destroy(acb->dm_segs_dmat);
3253 	bus_dma_tag_destroy(acb->parent_dmat);
3254 	return;
3255 }
3256 /*
3257 ************************************************************************
3258 ************************************************************************
3259 */
3260 static u_int32_t arcmsr_initialize(device_t dev)
3261 {
3262 	struct AdapterControlBlock *acb=device_get_softc(dev);
3263 	u_int16_t pci_command;
3264 	int i, j,max_coherent_size;
3265 
3266 	switch (pci_get_devid(dev)) {
3267 	case PCIDevVenIDARC1880: {
3268 			acb->adapter_type=ACB_ADAPTER_TYPE_C;
3269 			max_coherent_size=ARCMSR_SRBS_POOL_SIZE;
3270 		}
3271 		break;
3272 	case PCIDevVenIDARC1201: {
3273 			acb->adapter_type=ACB_ADAPTER_TYPE_B;
3274 			max_coherent_size=ARCMSR_SRBS_POOL_SIZE+(sizeof(struct HBB_MessageUnit));
3275 		}
3276 		break;
3277 	case PCIDevVenIDARC1110:
3278 	case PCIDevVenIDARC1120:
3279 	case PCIDevVenIDARC1130:
3280 	case PCIDevVenIDARC1160:
3281 	case PCIDevVenIDARC1170:
3282 	case PCIDevVenIDARC1210:
3283 	case PCIDevVenIDARC1220:
3284 	case PCIDevVenIDARC1230:
3285 	case PCIDevVenIDARC1260:
3286 	case PCIDevVenIDARC1270:
3287 	case PCIDevVenIDARC1280:
3288 	case PCIDevVenIDARC1212:
3289 	case PCIDevVenIDARC1222:
3290 	case PCIDevVenIDARC1380:
3291 	case PCIDevVenIDARC1381:
3292 	case PCIDevVenIDARC1680:
3293 	case PCIDevVenIDARC1681: {
3294 			acb->adapter_type=ACB_ADAPTER_TYPE_A;
3295 			max_coherent_size=ARCMSR_SRBS_POOL_SIZE;
3296 		}
3297 		break;
3298 	default: {
3299 			kprintf("arcmsr%d:"
3300 			" unknown RAID adapter type \n", device_get_unit(dev));
3301 			return ENOMEM;
3302 		}
3303 	}
3304 	if(bus_dma_tag_create(  /*parent*/	NULL,
3305 				/*alignemnt*/	1,
3306 				/*boundary*/	0,
3307 				/*lowaddr*/	BUS_SPACE_MAXADDR,
3308 				/*highaddr*/	BUS_SPACE_MAXADDR,
3309 				/*filter*/	NULL,
3310 				/*filterarg*/	NULL,
3311 				/*maxsize*/	BUS_SPACE_MAXSIZE_32BIT,
3312 				/*nsegments*/	BUS_SPACE_UNRESTRICTED,
3313 				/*maxsegsz*/	BUS_SPACE_MAXSIZE_32BIT,
3314 				/*flags*/	0,
3315 						&acb->parent_dmat) != 0)
3316 	{
3317 		kprintf("arcmsr%d: parent_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
3318 		return ENOMEM;
3319 	}
3320 	/* Create a single tag describing a region large enough to hold all of the s/g lists we will need. */
3321 	if(bus_dma_tag_create(  /*parent_dmat*/	acb->parent_dmat,
3322 				/*alignment*/	1,
3323 				/*boundary*/	0,
3324 				/*lowaddr*/	BUS_SPACE_MAXADDR,
3325 				/*highaddr*/	BUS_SPACE_MAXADDR,
3326 				/*filter*/	NULL,
3327 				/*filterarg*/	NULL,
3328 				/*maxsize*/	MAXBSIZE,
3329 				/*nsegments*/	ARCMSR_MAX_SG_ENTRIES,
3330 				/*maxsegsz*/	BUS_SPACE_MAXSIZE_32BIT,
3331 				/*flags*/	0,
3332 						&acb->dm_segs_dmat) != 0)
3333 	{
3334 		bus_dma_tag_destroy(acb->parent_dmat);
3335 		kprintf("arcmsr%d: dm_segs_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
3336 		return ENOMEM;
3337 	}
3338 	/* DMA tag for our srb structures.... Allocate the freesrb memory */
3339 	if(bus_dma_tag_create(  /*parent_dmat*/	acb->parent_dmat,
3340 				/*alignment*/	0x20,
3341 				/*boundary*/	0,
3342 				/*lowaddr*/	BUS_SPACE_MAXADDR_32BIT,
3343 				/*highaddr*/	BUS_SPACE_MAXADDR,
3344 				/*filter*/	NULL,
3345 				/*filterarg*/	NULL,
3346 				/*maxsize*/	max_coherent_size,
3347 				/*nsegments*/	1,
3348 				/*maxsegsz*/	BUS_SPACE_MAXSIZE_32BIT,
3349 				/*flags*/	0,
3350 						&acb->srb_dmat) != 0)
3351 	{
3352 		bus_dma_tag_destroy(acb->dm_segs_dmat);
3353 		bus_dma_tag_destroy(acb->parent_dmat);
3354 		kprintf("arcmsr%d: srb_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
3355 		return ENXIO;
3356 	}
3357 	/* Allocation for our srbs */
3358 	if(bus_dmamem_alloc(acb->srb_dmat, (void **)&acb->uncacheptr, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &acb->srb_dmamap) != 0) {
3359 		bus_dma_tag_destroy(acb->srb_dmat);
3360 		bus_dma_tag_destroy(acb->dm_segs_dmat);
3361 		bus_dma_tag_destroy(acb->parent_dmat);
3362 		kprintf("arcmsr%d: srb_dmat bus_dmamem_alloc failure!\n", device_get_unit(dev));
3363 		return ENXIO;
3364 	}
3365 	/* And permanently map them */
3366 	if(bus_dmamap_load(acb->srb_dmat, acb->srb_dmamap, acb->uncacheptr, max_coherent_size, arcmsr_map_freesrb, acb, /*flags*/0)) {
3367 		bus_dma_tag_destroy(acb->srb_dmat);
3368 		bus_dma_tag_destroy(acb->dm_segs_dmat);
3369 		bus_dma_tag_destroy(acb->parent_dmat);
3370 		kprintf("arcmsr%d: srb_dmat bus_dmamap_load failure!\n", device_get_unit(dev));
3371 		return ENXIO;
3372 	}
3373 	pci_command=pci_read_config(dev, PCIR_COMMAND, 2);
3374 	pci_command |= PCIM_CMD_BUSMASTEREN;
3375 	pci_command |= PCIM_CMD_PERRESPEN;
3376 	pci_command |= PCIM_CMD_MWRICEN;
3377 	/* Enable Busmaster/Mem */
3378 	pci_command |= PCIM_CMD_MEMEN;
3379 	pci_write_config(dev, PCIR_COMMAND, pci_command, 2);
3380 	switch(acb->adapter_type) {
3381 	case ACB_ADAPTER_TYPE_A: {
3382 			u_int32_t rid0=PCIR_BAR(0);
3383 			vm_offset_t	mem_base0;
3384 
3385 			acb->sys_res_arcmsr[0]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, 0x1000, RF_ACTIVE);
3386 			if(acb->sys_res_arcmsr[0] == NULL) {
3387 				arcmsr_free_resource(acb);
3388 				kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
3389 				return ENOMEM;
3390 			}
3391 			if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) {
3392 				arcmsr_free_resource(acb);
3393 				kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev));
3394 				return ENXIO;
3395 			}
3396 			mem_base0=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]);
3397 			if(mem_base0==0) {
3398 				arcmsr_free_resource(acb);
3399 				kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev));
3400 				return ENXIO;
3401 			}
3402 			acb->btag[0]=rman_get_bustag(acb->sys_res_arcmsr[0]);
3403 			acb->bhandle[0]=rman_get_bushandle(acb->sys_res_arcmsr[0]);
3404 			acb->pmu=(struct MessageUnit_UNION *)mem_base0;
3405 		}
3406 		break;
3407 	case ACB_ADAPTER_TYPE_B: {
3408 			struct HBB_MessageUnit *phbbmu;
3409 			struct CommandControlBlock *freesrb;
3410 			u_int32_t rid[]={ PCIR_BAR(0), PCIR_BAR(2) };
3411 			vm_offset_t	mem_base[]={0,0};
3412 			for(i=0; i<2; i++) {
3413 				if(i==0) {
3414 					acb->sys_res_arcmsr[i]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid[i],
3415 											0x20400, 0x20400+sizeof(struct HBB_DOORBELL), sizeof(struct HBB_DOORBELL), RF_ACTIVE);
3416 				} else {
3417 					acb->sys_res_arcmsr[i]=bus_alloc_resource(dev, SYS_RES_MEMORY, &rid[i],
3418 											0x0fa00, 0x0fa00+sizeof(struct HBB_RWBUFFER), sizeof(struct HBB_RWBUFFER), RF_ACTIVE);
3419 				}
3420 				if(acb->sys_res_arcmsr[i] == NULL) {
3421 					arcmsr_free_resource(acb);
3422 					kprintf("arcmsr%d: bus_alloc_resource %d failure!\n", device_get_unit(dev), i);
3423 					return ENOMEM;
3424 				}
3425 				if(rman_get_start(acb->sys_res_arcmsr[i]) <= 0) {
3426 					arcmsr_free_resource(acb);
3427 					kprintf("arcmsr%d: rman_get_start %d failure!\n", device_get_unit(dev), i);
3428 					return ENXIO;
3429 				}
3430 				mem_base[i]=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[i]);
3431 				if(mem_base[i]==0) {
3432 					arcmsr_free_resource(acb);
3433 					kprintf("arcmsr%d: rman_get_virtual %d failure!\n", device_get_unit(dev), i);
3434 					return ENXIO;
3435 				}
3436 				acb->btag[i]=rman_get_bustag(acb->sys_res_arcmsr[i]);
3437 				acb->bhandle[i]=rman_get_bushandle(acb->sys_res_arcmsr[i]);
3438 			}
3439 			freesrb=(struct CommandControlBlock *)acb->uncacheptr;
3440 			acb->pmu=(struct MessageUnit_UNION *)&freesrb[ARCMSR_MAX_FREESRB_NUM];
3441 			phbbmu=(struct HBB_MessageUnit *)acb->pmu;
3442 			phbbmu->hbb_doorbell=(struct HBB_DOORBELL *)mem_base[0];
3443 			phbbmu->hbb_rwbuffer=(struct HBB_RWBUFFER *)mem_base[1];
3444 		}
3445 		break;
3446 	case ACB_ADAPTER_TYPE_C: {
3447 			u_int32_t rid0=PCIR_BAR(1);
3448 			vm_offset_t	mem_base0;
3449 
3450 			acb->sys_res_arcmsr[0]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, sizeof(struct HBC_MessageUnit), RF_ACTIVE);
3451 			if(acb->sys_res_arcmsr[0] == NULL) {
3452 				arcmsr_free_resource(acb);
3453 				kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
3454 				return ENOMEM;
3455 			}
3456 			if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) {
3457 				arcmsr_free_resource(acb);
3458 				kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev));
3459 				return ENXIO;
3460 			}
3461 			mem_base0=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]);
3462 			if(mem_base0==0) {
3463 				arcmsr_free_resource(acb);
3464 				kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev));
3465 				return ENXIO;
3466 			}
3467 			acb->btag[0]=rman_get_bustag(acb->sys_res_arcmsr[0]);
3468 			acb->bhandle[0]=rman_get_bushandle(acb->sys_res_arcmsr[0]);
3469 			acb->pmu=(struct MessageUnit_UNION *)mem_base0;
3470 		}
3471 		break;
3472 	}
3473 	if(acb->acb_flags & ACB_F_MAPFREESRB_FAILD) {
3474 		arcmsr_free_resource(acb);
3475 		kprintf("arcmsr%d: map free srb failure!\n", device_get_unit(dev));
3476 		return ENXIO;
3477 	}
3478 	acb->acb_flags  |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_RQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ);
3479 	acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
3480 	/*
3481 	********************************************************************
3482 	** init raid volume state
3483 	********************************************************************
3484 	*/
3485 	for(i=0;i<ARCMSR_MAX_TARGETID;i++) {
3486 		for(j=0;j<ARCMSR_MAX_TARGETLUN;j++) {
3487 			acb->devstate[i][j]=ARECA_RAID_GONE;
3488 		}
3489 	}
3490 	arcmsr_iop_init(acb);
3491 	return(0);
3492 }
3493 /*
3494 ************************************************************************
3495 ************************************************************************
3496 */
3497 static int arcmsr_attach(device_t dev)
3498 {
3499 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
3500 	u_int32_t unit=device_get_unit(dev);
3501 	struct ccb_setasync csa;
3502 	struct cam_devq	*devq;	/* Device Queue to use for this SIM */
3503 	struct resource	*irqres;
3504 	int	rid;
3505 
3506 	if(acb == NULL) {
3507 		kprintf("arcmsr%d: cannot allocate softc\n", unit);
3508 		return (ENOMEM);
3509 	}
3510 	ARCMSR_LOCK_INIT(&acb->qbuffer_lock, "arcmsr Q buffer lock");
3511 	if(arcmsr_initialize(dev)) {
3512 		kprintf("arcmsr%d: initialize failure!\n", unit);
3513 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3514 		return ENXIO;
3515 	}
3516 	/* After setting up the adapter, map our interrupt */
3517 	rid=0;
3518 	irqres=bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0ul, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE);
3519 	if(irqres == NULL ||
3520 		bus_setup_intr(dev, irqres, 0, arcmsr_intr_handler, acb, &acb->ih, NULL)) {
3521 		arcmsr_free_resource(acb);
3522 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3523 		kprintf("arcmsr%d: unable to register interrupt handler!\n", unit);
3524 		return ENXIO;
3525 	}
3526 	acb->irqres=irqres;
3527 	acb->pci_dev=dev;
3528 	acb->pci_unit=unit;
3529 	/*
3530 	 * Now let the CAM generic SCSI layer find the SCSI devices on
3531 	 * the bus *  start queue to reset to the idle loop. *
3532 	 * Create device queue of SIM(s) *  (MAX_START_JOB - 1) :
3533 	 * max_sim_transactions
3534 	*/
3535 	devq=cam_simq_alloc(ARCMSR_MAX_START_JOB);
3536 	if(devq == NULL) {
3537 	    arcmsr_free_resource(acb);
3538 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3539 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3540 		kprintf("arcmsr%d: cam_simq_alloc failure!\n", unit);
3541 		return ENXIO;
3542 	}
3543 	acb->psim=cam_sim_alloc(arcmsr_action, arcmsr_poll, "arcmsr", acb, unit, &acb->qbuffer_lock, 1, ARCMSR_MAX_OUTSTANDING_CMD, devq);
3544 	if(acb->psim == NULL) {
3545 		arcmsr_free_resource(acb);
3546 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3547 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3548 		kprintf("arcmsr%d: cam_sim_alloc failure!\n", unit);
3549 		return ENXIO;
3550 	}
3551 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
3552 	if(xpt_bus_register(acb->psim, 0) != CAM_SUCCESS) {
3553 		arcmsr_free_resource(acb);
3554 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3555 		cam_sim_free(acb->psim);
3556 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3557 		kprintf("arcmsr%d: xpt_bus_register failure!\n", unit);
3558 		return ENXIO;
3559 	}
3560 	if(xpt_create_path(&acb->ppath, /* periph */ NULL, cam_sim_path(acb->psim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
3561 		arcmsr_free_resource(acb);
3562 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3563 		xpt_bus_deregister(cam_sim_path(acb->psim));
3564 		cam_sim_free(acb->psim);
3565 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3566 		kprintf("arcmsr%d: xpt_create_path failure!\n", unit);
3567 		return ENXIO;
3568 	}
3569 	/*
3570 	****************************************************
3571 	*/
3572 	xpt_setup_ccb(&csa.ccb_h, acb->ppath, /*priority*/5);
3573 	csa.ccb_h.func_code=XPT_SASYNC_CB;
3574 	csa.event_enable=AC_FOUND_DEVICE|AC_LOST_DEVICE;
3575 	csa.callback=arcmsr_async;
3576 	csa.callback_arg=acb->psim;
3577 	xpt_action((union ccb *)&csa);
3578 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
3579 	/* Create the control device.  */
3580 	acb->ioctl_dev=make_dev(&arcmsr_ops, unit, UID_ROOT, GID_WHEEL /* GID_OPERATOR */, S_IRUSR | S_IWUSR, "arcmsr%d", unit);
3581 
3582 	acb->ioctl_dev->si_drv1=acb;
3583 #if __FreeBSD_version > 500005	/* XXX swildner */
3584 	(void)make_dev_alias(acb->ioctl_dev, "arc%d", unit);
3585 #endif
3586 	callout_init(&acb->devmap_callout);
3587 	callout_reset(&acb->devmap_callout, 60 * hz, arcmsr_polling_devmap, acb);
3588 	return 0;
3589 }
3590 /*
3591 ************************************************************************
3592 ************************************************************************
3593 */
3594 static int arcmsr_probe(device_t dev)
3595 {
3596 	u_int32_t id;
3597 	static char buf[256];
3598 	char *type;
3599 	int raid6 = 1;
3600 
3601 	if (pci_get_vendor(dev) != PCI_VENDOR_ID_ARECA) {
3602 		return (ENXIO);
3603 	}
3604 	switch(id=pci_get_devid(dev)) {
3605 	case PCIDevVenIDARC1110:
3606 	case PCIDevVenIDARC1210:
3607 	case PCIDevVenIDARC1201:
3608 		raid6 = 0;
3609 		/*FALLTHRU*/
3610 	case PCIDevVenIDARC1120:
3611 	case PCIDevVenIDARC1130:
3612 	case PCIDevVenIDARC1160:
3613 	case PCIDevVenIDARC1170:
3614 	case PCIDevVenIDARC1220:
3615 	case PCIDevVenIDARC1230:
3616 	case PCIDevVenIDARC1260:
3617 	case PCIDevVenIDARC1270:
3618 	case PCIDevVenIDARC1280:
3619 		type = "SATA";
3620 		break;
3621 	case PCIDevVenIDARC1212:
3622 	case PCIDevVenIDARC1222:
3623 	case PCIDevVenIDARC1380:
3624 	case PCIDevVenIDARC1381:
3625 	case PCIDevVenIDARC1680:
3626 	case PCIDevVenIDARC1681:
3627 		type = "SAS 3G";
3628 		break;
3629 	case PCIDevVenIDARC1880:
3630 		type = "SAS 6G";
3631 		break;
3632 	default:
3633 		type = "X-TYPE";
3634 		break;
3635 	}
3636 	ksprintf(buf, "Areca %s Host Adapter RAID Controller %s\n", type, raid6 ? "(RAID6 capable)" : "");
3637 	device_set_desc_copy(dev, buf);
3638 	return 0;
3639 }
3640 /*
3641 ************************************************************************
3642 ************************************************************************
3643 */
3644 static int arcmsr_shutdown(device_t dev)
3645 {
3646 	u_int32_t  i;
3647 	u_int32_t intmask_org;
3648 	struct CommandControlBlock *srb;
3649 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
3650 
3651 	/* stop adapter background rebuild */
3652 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
3653 	/* disable all outbound interrupt */
3654 	intmask_org=arcmsr_disable_allintr(acb);
3655 	arcmsr_stop_adapter_bgrb(acb);
3656 	arcmsr_flush_adapter_cache(acb);
3657 	/* abort all outstanding command */
3658 	acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
3659 	acb->acb_flags &= ~ACB_F_IOP_INITED;
3660 	if(acb->srboutstandingcount!=0) {
3661 		/*clear and abort all outbound posted Q*/
3662 		arcmsr_done4abort_postqueue(acb);
3663 		/* talk to iop 331 outstanding command aborted*/
3664 		arcmsr_abort_allcmd(acb);
3665 		for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
3666 			srb=acb->psrb_pool[i];
3667 			if(srb->startdone==ARCMSR_SRB_START) {
3668 				srb->startdone=ARCMSR_SRB_ABORTED;
3669 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
3670 				arcmsr_srb_complete(srb, 1);
3671 			}
3672 		}
3673 	}
3674 	atomic_set_int(&acb->srboutstandingcount, 0);
3675 	acb->workingsrb_doneindex=0;
3676 	acb->workingsrb_startindex=0;
3677 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
3678 	return (0);
3679 }
3680 /*
3681 ************************************************************************
3682 ************************************************************************
3683 */
3684 static int arcmsr_detach(device_t dev)
3685 {
3686 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
3687 	int i;
3688 
3689 	callout_stop(&acb->devmap_callout);
3690 	bus_teardown_intr(dev, acb->irqres, acb->ih);
3691 	arcmsr_shutdown(dev);
3692 	arcmsr_free_resource(acb);
3693 	for(i=0; (acb->sys_res_arcmsr[i]!=NULL) && (i<2); i++) {
3694 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(i), acb->sys_res_arcmsr[i]);
3695 	}
3696 	bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3697 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
3698 	xpt_async(AC_LOST_DEVICE, acb->ppath, NULL);
3699 	xpt_free_path(acb->ppath);
3700 	xpt_bus_deregister(cam_sim_path(acb->psim));
3701 	cam_sim_free(acb->psim);
3702 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
3703 	ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3704 	return (0);
3705 }
3706