1 /* 2 ***************************************************************************************** 3 ** O.S : FreeBSD 4 ** FILE NAME : arcmsr.c 5 ** BY : Erich Chen, Ching Huang 6 ** Description: SCSI RAID Device Driver for 7 ** ARECA (ARC11XX/ARC12XX/ARC13XX/ARC16XX/ARC188x) SATA/SAS RAID HOST Adapter 8 ** ARCMSR RAID Host adapter 9 ** [RAID controller:INTEL 331(PCI-X) 341(PCI-EXPRESS) chip set] 10 ****************************************************************************************** 11 ************************************************************************ 12 ** 13 ** Copyright (c) 2004-2010 ARECA Co. Ltd. 14 ** Erich Chen, Taipei Taiwan All rights reserved. 15 ** 16 ** Redistribution and use in source and binary forms, with or without 17 ** modification, are permitted provided that the following conditions 18 ** are met: 19 ** 1. Redistributions of source code must retain the above copyright 20 ** notice, this list of conditions and the following disclaimer. 21 ** 2. Redistributions in binary form must reproduce the above copyright 22 ** notice, this list of conditions and the following disclaimer in the 23 ** documentation and/or other materials provided with the distribution. 24 ** 3. The name of the author may not be used to endorse or promote products 25 ** derived from this software without specific prior written permission. 26 ** 27 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 28 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 29 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 30 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 31 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT 32 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 33 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY 34 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 35 **(INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF 36 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 37 ************************************************************************** 38 ** History 39 ** 40 ** REV# DATE NAME DESCRIPTION 41 ** 1.00.00.00 3/31/2004 Erich Chen First release 42 ** 1.20.00.02 11/29/2004 Erich Chen bug fix with arcmsr_bus_reset when PHY error 43 ** 1.20.00.03 4/19/2005 Erich Chen add SATA 24 Ports adapter type support 44 ** clean unused function 45 ** 1.20.00.12 9/12/2005 Erich Chen bug fix with abort command handling, 46 ** firmware version check 47 ** and firmware update notify for hardware bug fix 48 ** handling if none zero high part physical address 49 ** of srb resource 50 ** 1.20.00.13 8/18/2006 Erich Chen remove pending srb and report busy 51 ** add iop message xfer 52 ** with scsi pass-through command 53 ** add new device id of sas raid adapters 54 ** code fit for SPARC64 & PPC 55 ** 1.20.00.14 02/05/2007 Erich Chen bug fix for incorrect ccb_h.status report 56 ** and cause g_vfs_done() read write error 57 ** 1.20.00.15 10/10/2007 Erich Chen support new RAID adapter type ARC120x 58 ** 1.20.00.16 10/10/2009 Erich Chen Bug fix for RAID adapter type ARC120x 59 ** bus_dmamem_alloc() with BUS_DMA_ZERO 60 ** 1.20.00.17 07/15/2010 Ching Huang Added support ARC1880 61 ** report CAM_DEV_NOT_THERE instead of CAM_SEL_TIMEOUT when device failed, 62 ** prevent cam_periph_error removing all LUN devices of one Target id 63 ** for any one LUN device failed 64 ** 1.20.00.18 10/14/2010 Ching Huang Fixed "inquiry data fails comparion at DV1 step" 65 ** 10/25/2010 Ching Huang Fixed bad range input in bus_alloc_resource for ADAPTER_TYPE_B 66 ** 1.20.00.19 11/11/2010 Ching Huang Fixed arcmsr driver prevent arcsas support for Areca SAS HBA ARC13x0 67 ****************************************************************************************** 68 * $FreeBSD: src/sys/dev/arcmsr/arcmsr.c,v 1.35 2010/11/13 08:58:36 delphij Exp $ 69 */ 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/malloc.h> 73 #include <sys/kernel.h> 74 #include <sys/bus.h> 75 #include <sys/queue.h> 76 #include <sys/stat.h> 77 #include <sys/devicestat.h> 78 #include <sys/kthread.h> 79 #include <sys/module.h> 80 #include <sys/proc.h> 81 #include <sys/lock.h> 82 #include <sys/sysctl.h> 83 #include <sys/thread2.h> 84 #include <sys/poll.h> 85 #include <sys/device.h> 86 #include <vm/vm.h> 87 #include <vm/vm_param.h> 88 #include <vm/pmap.h> 89 90 #include <machine/atomic.h> 91 #include <sys/conf.h> 92 #include <sys/rman.h> 93 94 #include <bus/cam/cam.h> 95 #include <bus/cam/cam_ccb.h> 96 #include <bus/cam/cam_sim.h> 97 #include <bus/cam/cam_periph.h> 98 #include <bus/cam/cam_xpt_periph.h> 99 #include <bus/cam/cam_xpt_sim.h> 100 #include <bus/cam/cam_debug.h> 101 #include <bus/cam/scsi/scsi_all.h> 102 #include <bus/cam/scsi/scsi_message.h> 103 /* 104 ************************************************************************** 105 ************************************************************************** 106 */ 107 #include <sys/endian.h> 108 #include <bus/pci/pcivar.h> 109 #include <bus/pci/pcireg.h> 110 #define ARCMSR_LOCK_INIT(l, s) lockinit(l, s, 0, LK_CANRECURSE) 111 #define ARCMSR_LOCK_DESTROY(l) lockuninit(l) 112 #define ARCMSR_LOCK_ACQUIRE(l) lockmgr(l, LK_EXCLUSIVE) 113 #define ARCMSR_LOCK_RELEASE(l) lockmgr(l, LK_RELEASE) 114 #define ARCMSR_LOCK_TRY(l) lockmgr(&l, LK_EXCLUSIVE|LK_NOWAIT); 115 #define arcmsr_htole32(x) htole32(x) 116 typedef struct lock arcmsr_lock_t; 117 118 #if !defined(CAM_NEW_TRAN_CODE) 119 #define CAM_NEW_TRAN_CODE 1 120 #endif 121 122 #define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.19 2010-11-11" 123 #include <dev/raid/arcmsr/arcmsr.h> 124 #define ARCMSR_SRBS_POOL_SIZE ((sizeof(struct CommandControlBlock) * ARCMSR_MAX_FREESRB_NUM)) 125 /* 126 ************************************************************************** 127 ************************************************************************** 128 */ 129 #define CHIP_REG_READ32(s, b, r) bus_space_read_4(acb->btag[b], acb->bhandle[b], offsetof(struct s, r)) 130 #define CHIP_REG_WRITE32(s, b, r, d) bus_space_write_4(acb->btag[b], acb->bhandle[b], offsetof(struct s, r), d) 131 /* 132 ************************************************************************** 133 ************************************************************************** 134 */ 135 static struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb); 136 static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb); 137 static int arcmsr_probe(device_t dev); 138 static int arcmsr_attach(device_t dev); 139 static int arcmsr_detach(device_t dev); 140 static u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg); 141 static void arcmsr_iop_parking(struct AdapterControlBlock *acb); 142 static int arcmsr_shutdown(device_t dev); 143 static void arcmsr_interrupt(struct AdapterControlBlock *acb); 144 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb); 145 static void arcmsr_free_resource(struct AdapterControlBlock *acb); 146 static void arcmsr_bus_reset(struct AdapterControlBlock *acb); 147 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb); 148 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb); 149 static void arcmsr_iop_init(struct AdapterControlBlock *acb); 150 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb); 151 static void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb); 152 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb); 153 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag); 154 static void arcmsr_iop_reset(struct AdapterControlBlock *acb); 155 static void arcmsr_report_sense_info(struct CommandControlBlock *srb); 156 static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t * dm_segs, u_int32_t nseg); 157 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb); 158 static int arcmsr_resume(device_t dev); 159 static int arcmsr_suspend(device_t dev); 160 static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb); 161 static void arcmsr_polling_devmap(void* arg); 162 /* 163 ************************************************************************** 164 ************************************************************************** 165 */ 166 static void UDELAY(u_int32_t us) { DELAY(us); } 167 /* 168 ************************************************************************** 169 ************************************************************************** 170 */ 171 static bus_dmamap_callback_t arcmsr_map_free_srb; 172 static bus_dmamap_callback_t arcmsr_execute_srb; 173 /* 174 ************************************************************************** 175 ************************************************************************** 176 */ 177 static d_open_t arcmsr_open; 178 static d_close_t arcmsr_close; 179 static d_ioctl_t arcmsr_ioctl; 180 181 static device_method_t arcmsr_methods[]={ 182 DEVMETHOD(device_probe, arcmsr_probe), 183 DEVMETHOD(device_attach, arcmsr_attach), 184 DEVMETHOD(device_detach, arcmsr_detach), 185 DEVMETHOD(device_shutdown, arcmsr_shutdown), 186 DEVMETHOD(device_suspend, arcmsr_suspend), 187 DEVMETHOD(device_resume, arcmsr_resume), 188 DEVMETHOD(bus_print_child, bus_generic_print_child), 189 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 190 { 0, 0 } 191 }; 192 193 static driver_t arcmsr_driver={ 194 "arcmsr", arcmsr_methods, sizeof(struct AdapterControlBlock) 195 }; 196 197 static devclass_t arcmsr_devclass; 198 DRIVER_MODULE(arcmsr, pci, arcmsr_driver, arcmsr_devclass, NULL, NULL); 199 MODULE_VERSION(arcmsr, 1); 200 MODULE_DEPEND(arcmsr, pci, 1, 1, 1); 201 MODULE_DEPEND(arcmsr, cam, 1, 1, 1); 202 #ifndef BUS_DMA_COHERENT 203 #define BUS_DMA_COHERENT 0x04 /* hint: map memory in a coherent way */ 204 #endif 205 206 static struct dev_ops arcmsr_ops = { 207 { "arcmsr", 0, 0 }, 208 .d_open = arcmsr_open, /* open */ 209 .d_close = arcmsr_close, /* close */ 210 .d_ioctl = arcmsr_ioctl, /* ioctl */ 211 }; 212 213 static int arcmsr_msi_enable = 1; 214 TUNABLE_INT("hw.arcmsr.msi.enable", &arcmsr_msi_enable); 215 216 217 /* 218 ************************************************************************** 219 ************************************************************************** 220 */ 221 222 static int 223 arcmsr_open(struct dev_open_args *ap) 224 { 225 cdev_t dev = ap->a_head.a_dev; 226 struct AdapterControlBlock *acb=dev->si_drv1; 227 228 if(acb==NULL) { 229 return ENXIO; 230 } 231 return 0; 232 } 233 234 /* 235 ************************************************************************** 236 ************************************************************************** 237 */ 238 239 static int 240 arcmsr_close(struct dev_close_args *ap) 241 { 242 cdev_t dev = ap->a_head.a_dev; 243 struct AdapterControlBlock *acb=dev->si_drv1; 244 245 if(acb==NULL) { 246 return ENXIO; 247 } 248 return 0; 249 } 250 251 /* 252 ************************************************************************** 253 ************************************************************************** 254 */ 255 256 static int 257 arcmsr_ioctl(struct dev_ioctl_args *ap) 258 { 259 cdev_t dev = ap->a_head.a_dev; 260 u_long ioctl_cmd = ap->a_cmd; 261 caddr_t arg = ap->a_data; 262 struct AdapterControlBlock *acb=dev->si_drv1; 263 264 if(acb==NULL) { 265 return ENXIO; 266 } 267 return(arcmsr_iop_ioctlcmd(acb, ioctl_cmd, arg)); 268 } 269 270 /* 271 ********************************************************************** 272 ********************************************************************** 273 */ 274 static u_int32_t arcmsr_disable_allintr( struct AdapterControlBlock *acb) 275 { 276 u_int32_t intmask_org=0; 277 278 switch (acb->adapter_type) { 279 case ACB_ADAPTER_TYPE_A: { 280 /* disable all outbound interrupt */ 281 intmask_org=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intmask); /* disable outbound message0 int */ 282 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE); 283 } 284 break; 285 case ACB_ADAPTER_TYPE_B: { 286 /* disable all outbound interrupt */ 287 intmask_org=CHIP_REG_READ32(HBB_DOORBELL, 288 0, iop2drv_doorbell_mask) & (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); /* disable outbound message0 int */ 289 CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell_mask, 0); /* disable all interrupt */ 290 } 291 break; 292 case ACB_ADAPTER_TYPE_C: { 293 /* disable all outbound interrupt */ 294 intmask_org=CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_mask) ; /* disable outbound message0 int */ 295 CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE); 296 } 297 break; 298 } 299 return(intmask_org); 300 } 301 /* 302 ********************************************************************** 303 ********************************************************************** 304 */ 305 static void arcmsr_enable_allintr( struct AdapterControlBlock *acb, u_int32_t intmask_org) 306 { 307 u_int32_t mask; 308 309 switch (acb->adapter_type) { 310 case ACB_ADAPTER_TYPE_A: { 311 /* enable outbound Post Queue, outbound doorbell Interrupt */ 312 mask=~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE|ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE); 313 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org & mask); 314 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff; 315 } 316 break; 317 case ACB_ADAPTER_TYPE_B: { 318 /* enable ARCMSR_IOP2DRV_MESSAGE_CMD_DONE */ 319 mask=(ARCMSR_IOP2DRV_DATA_WRITE_OK|ARCMSR_IOP2DRV_DATA_READ_OK|ARCMSR_IOP2DRV_CDB_DONE|ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); 320 CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell_mask, intmask_org | mask); /*1=interrupt enable, 0=interrupt disable*/ 321 acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f; 322 } 323 break; 324 case ACB_ADAPTER_TYPE_C: { 325 /* enable outbound Post Queue, outbound doorbell Interrupt */ 326 mask=~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK); 327 CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org & mask); 328 acb->outbound_int_enable= ~(intmask_org & mask) & 0x0000000f; 329 } 330 break; 331 } 332 return; 333 } 334 /* 335 ********************************************************************** 336 ********************************************************************** 337 */ 338 static u_int8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb) 339 { 340 u_int32_t Index; 341 u_int8_t Retries=0x00; 342 343 do { 344 for(Index=0; Index < 100; Index++) { 345 if(CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { 346 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);/*clear interrupt*/ 347 return TRUE; 348 } 349 UDELAY(10000); 350 }/*max 1 seconds*/ 351 }while(Retries++ < 20);/*max 20 sec*/ 352 return FALSE; 353 } 354 /* 355 ********************************************************************** 356 ********************************************************************** 357 */ 358 static u_int8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb) 359 { 360 u_int32_t Index; 361 u_int8_t Retries=0x00; 362 363 do { 364 for(Index=0; Index < 100; Index++) { 365 if(CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { 366 CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt*/ 367 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT); 368 return TRUE; 369 } 370 UDELAY(10000); 371 }/*max 1 seconds*/ 372 }while(Retries++ < 20);/*max 20 sec*/ 373 return FALSE; 374 } 375 /* 376 ********************************************************************** 377 ********************************************************************** 378 */ 379 static u_int8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *acb) 380 { 381 u_int32_t Index; 382 u_int8_t Retries=0x00; 383 384 do { 385 for(Index=0; Index < 100; Index++) { 386 if(CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { 387 CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);/*clear interrupt*/ 388 return TRUE; 389 } 390 UDELAY(10000); 391 }/*max 1 seconds*/ 392 }while(Retries++ < 20);/*max 20 sec*/ 393 return FALSE; 394 } 395 /* 396 ************************************************************************ 397 ************************************************************************ 398 */ 399 static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb) 400 { 401 int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */ 402 403 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE); 404 do { 405 if(arcmsr_hba_wait_msgint_ready(acb)) { 406 break; 407 } else { 408 retry_count--; 409 } 410 }while(retry_count!=0); 411 return; 412 } 413 /* 414 ************************************************************************ 415 ************************************************************************ 416 */ 417 static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb) 418 { 419 int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */ 420 421 CHIP_REG_WRITE32(HBB_DOORBELL, 422 0, drv2iop_doorbell, ARCMSR_MESSAGE_FLUSH_CACHE); 423 do { 424 if(arcmsr_hbb_wait_msgint_ready(acb)) { 425 break; 426 } else { 427 retry_count--; 428 } 429 }while(retry_count!=0); 430 return; 431 } 432 /* 433 ************************************************************************ 434 ************************************************************************ 435 */ 436 static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *acb) 437 { 438 int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */ 439 440 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE); 441 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); 442 do { 443 if(arcmsr_hbc_wait_msgint_ready(acb)) { 444 break; 445 } else { 446 retry_count--; 447 } 448 }while(retry_count!=0); 449 return; 450 } 451 /* 452 ************************************************************************ 453 ************************************************************************ 454 */ 455 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb) 456 { 457 switch (acb->adapter_type) { 458 case ACB_ADAPTER_TYPE_A: { 459 arcmsr_flush_hba_cache(acb); 460 } 461 break; 462 case ACB_ADAPTER_TYPE_B: { 463 arcmsr_flush_hbb_cache(acb); 464 } 465 break; 466 case ACB_ADAPTER_TYPE_C: { 467 arcmsr_flush_hbc_cache(acb); 468 } 469 break; 470 } 471 return; 472 } 473 /* 474 ******************************************************************************* 475 ******************************************************************************* 476 */ 477 static int arcmsr_suspend(device_t dev) 478 { 479 struct AdapterControlBlock *acb = device_get_softc(dev); 480 481 /* flush controller */ 482 arcmsr_iop_parking(acb); 483 /* disable all outbound interrupt */ 484 arcmsr_disable_allintr(acb); 485 return(0); 486 } 487 /* 488 ******************************************************************************* 489 ******************************************************************************* 490 */ 491 static int arcmsr_resume(device_t dev) 492 { 493 struct AdapterControlBlock *acb = device_get_softc(dev); 494 495 arcmsr_iop_init(acb); 496 return(0); 497 } 498 /* 499 ********************************************************************************* 500 ********************************************************************************* 501 */ 502 static void arcmsr_async(void *cb_arg, u_int32_t code, struct cam_path *path, void *arg) 503 { 504 struct AdapterControlBlock *acb; 505 u_int8_t target_id, target_lun; 506 struct cam_sim * sim; 507 508 sim=(struct cam_sim *) cb_arg; 509 acb =(struct AdapterControlBlock *) cam_sim_softc(sim); 510 switch (code) { 511 case AC_LOST_DEVICE: 512 target_id=xpt_path_target_id(path); 513 target_lun=xpt_path_lun_id(path); 514 if((target_id > ARCMSR_MAX_TARGETID) || (target_lun > ARCMSR_MAX_TARGETLUN)) { 515 break; 516 } 517 kprintf("%s:scsi id=%d lun=%d device lost \n", device_get_name(acb->pci_dev), target_id, target_lun); 518 break; 519 default: 520 break; 521 } 522 } 523 /* 524 ********************************************************************** 525 ********************************************************************** 526 */ 527 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag) 528 { 529 struct AdapterControlBlock *acb=srb->acb; 530 union ccb * pccb=srb->pccb; 531 532 if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 533 bus_dmasync_op_t op; 534 535 if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 536 op = BUS_DMASYNC_POSTREAD; 537 } else { 538 op = BUS_DMASYNC_POSTWRITE; 539 } 540 bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op); 541 bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap); 542 } 543 if(stand_flag==1) { 544 atomic_subtract_int(&acb->srboutstandingcount, 1); 545 if((acb->acb_flags & ACB_F_CAM_DEV_QFRZN) && ( 546 acb->srboutstandingcount < ARCMSR_RELEASE_SIMQ_LEVEL)) { 547 acb->acb_flags &= ~ACB_F_CAM_DEV_QFRZN; 548 pccb->ccb_h.status |= CAM_RELEASE_SIMQ; 549 } 550 } 551 srb->startdone=ARCMSR_SRB_DONE; 552 srb->srb_flags=0; 553 acb->srbworkingQ[acb->workingsrb_doneindex]=srb; 554 acb->workingsrb_doneindex++; 555 acb->workingsrb_doneindex %= ARCMSR_MAX_FREESRB_NUM; 556 xpt_done(pccb); 557 return; 558 } 559 /* 560 ********************************************************************** 561 ********************************************************************** 562 */ 563 static void arcmsr_report_sense_info(struct CommandControlBlock *srb) 564 { 565 union ccb * pccb=srb->pccb; 566 567 pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 568 pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 569 if(&pccb->csio.sense_data) { 570 memset(&pccb->csio.sense_data, 0, sizeof(pccb->csio.sense_data)); 571 memcpy(&pccb->csio.sense_data, srb->arcmsr_cdb.SenseData, 572 get_min(sizeof(struct SENSE_DATA), sizeof(pccb->csio.sense_data))); 573 ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */ 574 pccb->ccb_h.status |= CAM_AUTOSNS_VALID; 575 } 576 return; 577 } 578 /* 579 ********************************************************************* 580 ********************************************************************* 581 */ 582 static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb) 583 { 584 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD); 585 if(!arcmsr_hba_wait_msgint_ready(acb)) { 586 kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit); 587 } 588 return; 589 } 590 /* 591 ********************************************************************* 592 ********************************************************************* 593 */ 594 static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb) 595 { 596 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_ABORT_CMD); 597 if(!arcmsr_hbb_wait_msgint_ready(acb)) { 598 kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit); 599 } 600 return; 601 } 602 /* 603 ********************************************************************* 604 ********************************************************************* 605 */ 606 static void arcmsr_abort_hbc_allcmd(struct AdapterControlBlock *acb) 607 { 608 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD); 609 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); 610 if(!arcmsr_hbc_wait_msgint_ready(acb)) { 611 kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit); 612 } 613 return; 614 } 615 /* 616 ********************************************************************* 617 ********************************************************************* 618 */ 619 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb) 620 { 621 switch (acb->adapter_type) { 622 case ACB_ADAPTER_TYPE_A: { 623 arcmsr_abort_hba_allcmd(acb); 624 } 625 break; 626 case ACB_ADAPTER_TYPE_B: { 627 arcmsr_abort_hbb_allcmd(acb); 628 } 629 break; 630 case ACB_ADAPTER_TYPE_C: { 631 arcmsr_abort_hbc_allcmd(acb); 632 } 633 break; 634 } 635 return; 636 } 637 /* 638 ************************************************************************** 639 ************************************************************************** 640 */ 641 static void arcmsr_report_srb_state(struct AdapterControlBlock *acb, struct CommandControlBlock *srb, u_int16_t error) 642 { 643 int target, lun; 644 645 target=srb->pccb->ccb_h.target_id; 646 lun=srb->pccb->ccb_h.target_lun; 647 if(error == FALSE) { 648 if(acb->devstate[target][lun]==ARECA_RAID_GONE) { 649 acb->devstate[target][lun]=ARECA_RAID_GOOD; 650 } 651 srb->pccb->ccb_h.status |= CAM_REQ_CMP; 652 arcmsr_srb_complete(srb, 1); 653 } else { 654 switch(srb->arcmsr_cdb.DeviceStatus) { 655 case ARCMSR_DEV_SELECT_TIMEOUT: { 656 if(acb->devstate[target][lun]==ARECA_RAID_GOOD) { 657 kprintf( "arcmsr%d: Target=%x, Lun=%x, selection timeout, raid volume was lost\n", acb->pci_unit, target, lun); 658 } 659 acb->devstate[target][lun]=ARECA_RAID_GONE; 660 srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE; 661 arcmsr_srb_complete(srb, 1); 662 } 663 break; 664 case ARCMSR_DEV_ABORTED: 665 case ARCMSR_DEV_INIT_FAIL: { 666 acb->devstate[target][lun]=ARECA_RAID_GONE; 667 srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE; 668 arcmsr_srb_complete(srb, 1); 669 } 670 break; 671 case SCSISTAT_CHECK_CONDITION: { 672 acb->devstate[target][lun]=ARECA_RAID_GOOD; 673 arcmsr_report_sense_info(srb); 674 arcmsr_srb_complete(srb, 1); 675 } 676 break; 677 default: 678 kprintf("arcmsr%d: scsi id=%d lun=%d isr got command error done,but got unknow DeviceStatus=0x%x \n" 679 , acb->pci_unit, target, lun ,srb->arcmsr_cdb.DeviceStatus); 680 acb->devstate[target][lun]=ARECA_RAID_GONE; 681 srb->pccb->ccb_h.status |= CAM_UNCOR_PARITY; 682 /*unknow error or crc error just for retry*/ 683 arcmsr_srb_complete(srb, 1); 684 break; 685 } 686 } 687 return; 688 } 689 /* 690 ************************************************************************** 691 ************************************************************************** 692 */ 693 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, u_int32_t flag_srb, u_int16_t error) 694 { 695 struct CommandControlBlock *srb; 696 697 /* check if command done with no error*/ 698 switch (acb->adapter_type) { 699 case ACB_ADAPTER_TYPE_C: 700 srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFF0));/*frame must be 32 bytes aligned*/ 701 break; 702 case ACB_ADAPTER_TYPE_A: 703 case ACB_ADAPTER_TYPE_B: 704 default: 705 srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/ 706 break; 707 } 708 if((srb->acb!=acb) || (srb->startdone!=ARCMSR_SRB_START)) { 709 if(srb->startdone==ARCMSR_SRB_ABORTED) { 710 kprintf("arcmsr%d: srb='%p' isr got aborted command \n", acb->pci_unit, srb); 711 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 712 arcmsr_srb_complete(srb, 1); 713 return; 714 } 715 kprintf("arcmsr%d: isr get an illegal srb command done" 716 "acb='%p' srb='%p' srbacb='%p' startdone=0x%xsrboutstandingcount=%d \n", 717 acb->pci_unit, acb, srb, srb->acb,srb->startdone, acb->srboutstandingcount); 718 return; 719 } 720 arcmsr_report_srb_state(acb, srb, error); 721 return; 722 } 723 /* 724 ********************************************************************** 725 ********************************************************************** 726 */ 727 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) 728 { 729 int i=0; 730 u_int32_t flag_srb; 731 u_int16_t error; 732 733 switch (acb->adapter_type) { 734 case ACB_ADAPTER_TYPE_A: { 735 u_int32_t outbound_intstatus; 736 737 /*clear and abort all outbound posted Q*/ 738 outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable; 739 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);/*clear interrupt*/ 740 while(((flag_srb=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport)) != 0xFFFFFFFF) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) { 741 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; 742 arcmsr_drain_donequeue(acb, flag_srb, error); 743 } 744 } 745 break; 746 case ACB_ADAPTER_TYPE_B: { 747 struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu; 748 749 /*clear all outbound posted Q*/ 750 CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */ 751 for(i=0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) { 752 if((flag_srb=phbbmu->done_qbuffer[i])!=0) { 753 phbbmu->done_qbuffer[i]=0; 754 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; 755 arcmsr_drain_donequeue(acb, flag_srb, error); 756 } 757 phbbmu->post_qbuffer[i]=0; 758 }/*drain reply FIFO*/ 759 phbbmu->doneq_index=0; 760 phbbmu->postq_index=0; 761 } 762 break; 763 case ACB_ADAPTER_TYPE_C: { 764 765 while((CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) { 766 flag_srb=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low); 767 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE; 768 arcmsr_drain_donequeue(acb, flag_srb, error); 769 } 770 } 771 break; 772 } 773 return; 774 } 775 /* 776 **************************************************************************** 777 **************************************************************************** 778 */ 779 static void arcmsr_iop_reset(struct AdapterControlBlock *acb) 780 { 781 struct CommandControlBlock *srb; 782 u_int32_t intmask_org; 783 u_int32_t i=0; 784 785 if(acb->srboutstandingcount>0) { 786 /* disable all outbound interrupt */ 787 intmask_org=arcmsr_disable_allintr(acb); 788 /*clear and abort all outbound posted Q*/ 789 arcmsr_done4abort_postqueue(acb); 790 /* talk to iop 331 outstanding command aborted*/ 791 arcmsr_abort_allcmd(acb); 792 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) { 793 srb=acb->psrb_pool[i]; 794 if(srb->startdone==ARCMSR_SRB_START) { 795 srb->startdone=ARCMSR_SRB_ABORTED; 796 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 797 arcmsr_srb_complete(srb, 1); 798 } 799 } 800 /* enable all outbound interrupt */ 801 arcmsr_enable_allintr(acb, intmask_org); 802 } 803 atomic_set_int(&acb->srboutstandingcount, 0); 804 acb->workingsrb_doneindex=0; 805 acb->workingsrb_startindex=0; 806 return; 807 } 808 /* 809 ********************************************************************** 810 ********************************************************************** 811 */ 812 static void arcmsr_build_srb(struct CommandControlBlock *srb, 813 bus_dma_segment_t *dm_segs, u_int32_t nseg) 814 { 815 struct ARCMSR_CDB * arcmsr_cdb= &srb->arcmsr_cdb; 816 u_int8_t * psge=(u_int8_t *)&arcmsr_cdb->u; 817 u_int32_t address_lo, address_hi; 818 union ccb * pccb=srb->pccb; 819 struct ccb_scsiio * pcsio= &pccb->csio; 820 u_int32_t arccdbsize=0x30; 821 822 memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB)); 823 arcmsr_cdb->Bus=0; 824 arcmsr_cdb->TargetID=pccb->ccb_h.target_id; 825 arcmsr_cdb->LUN=pccb->ccb_h.target_lun; 826 arcmsr_cdb->Function=1; 827 arcmsr_cdb->CdbLength=(u_int8_t)pcsio->cdb_len; 828 arcmsr_cdb->Context=0; 829 bcopy(pcsio->cdb_io.cdb_bytes, arcmsr_cdb->Cdb, pcsio->cdb_len); 830 if(nseg != 0) { 831 struct AdapterControlBlock *acb=srb->acb; 832 bus_dmasync_op_t op; 833 u_int32_t length, i, cdb_sgcount=0; 834 835 if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 836 op=BUS_DMASYNC_PREREAD; 837 } else { 838 op=BUS_DMASYNC_PREWRITE; 839 arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_WRITE; 840 srb->srb_flags|=SRB_FLAG_WRITE; 841 } 842 bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op); 843 for(i=0;i<nseg;i++) { 844 /* Get the physical address of the current data pointer */ 845 length=arcmsr_htole32(dm_segs[i].ds_len); 846 address_lo=arcmsr_htole32(dma_addr_lo32(dm_segs[i].ds_addr)); 847 address_hi=arcmsr_htole32(dma_addr_hi32(dm_segs[i].ds_addr)); 848 if(address_hi==0) { 849 struct SG32ENTRY * pdma_sg=(struct SG32ENTRY *)psge; 850 pdma_sg->address=address_lo; 851 pdma_sg->length=length; 852 psge += sizeof(struct SG32ENTRY); 853 arccdbsize += sizeof(struct SG32ENTRY); 854 } else { 855 u_int32_t sg64s_size=0, tmplength=length; 856 857 while(1) { 858 u_int64_t span4G, length0; 859 struct SG64ENTRY * pdma_sg=(struct SG64ENTRY *)psge; 860 861 span4G=(u_int64_t)address_lo + tmplength; 862 pdma_sg->addresshigh=address_hi; 863 pdma_sg->address=address_lo; 864 if(span4G > 0x100000000) { 865 /*see if cross 4G boundary*/ 866 length0=0x100000000-address_lo; 867 pdma_sg->length=(u_int32_t)length0|IS_SG64_ADDR; 868 address_hi=address_hi+1; 869 address_lo=0; 870 tmplength=tmplength-(u_int32_t)length0; 871 sg64s_size += sizeof(struct SG64ENTRY); 872 psge += sizeof(struct SG64ENTRY); 873 cdb_sgcount++; 874 } else { 875 pdma_sg->length=tmplength|IS_SG64_ADDR; 876 sg64s_size += sizeof(struct SG64ENTRY); 877 psge += sizeof(struct SG64ENTRY); 878 break; 879 } 880 } 881 arccdbsize += sg64s_size; 882 } 883 cdb_sgcount++; 884 } 885 arcmsr_cdb->sgcount=(u_int8_t)cdb_sgcount; 886 arcmsr_cdb->DataLength=pcsio->dxfer_len; 887 if( arccdbsize > 256) { 888 arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_SGL_BSIZE; 889 } 890 } else { 891 arcmsr_cdb->DataLength = 0; 892 } 893 srb->arc_cdb_size=arccdbsize; 894 return; 895 } 896 /* 897 ************************************************************************** 898 ************************************************************************** 899 */ 900 static void arcmsr_post_srb(struct AdapterControlBlock *acb, struct CommandControlBlock *srb) 901 { 902 u_int32_t cdb_shifted_phyaddr=(u_int32_t) srb->cdb_shifted_phyaddr; 903 struct ARCMSR_CDB * arcmsr_cdb=(struct ARCMSR_CDB *)&srb->arcmsr_cdb; 904 905 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, (srb->srb_flags & SRB_FLAG_WRITE) ? BUS_DMASYNC_POSTWRITE:BUS_DMASYNC_POSTREAD); 906 atomic_add_int(&acb->srboutstandingcount, 1); 907 srb->startdone=ARCMSR_SRB_START; 908 909 switch (acb->adapter_type) { 910 case ACB_ADAPTER_TYPE_A: { 911 if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) { 912 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_shifted_phyaddr|ARCMSR_SRBPOST_FLAG_SGL_BSIZE); 913 } else { 914 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_shifted_phyaddr); 915 } 916 } 917 break; 918 case ACB_ADAPTER_TYPE_B: { 919 struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu; 920 int ending_index, index; 921 922 index=phbbmu->postq_index; 923 ending_index=((index+1)%ARCMSR_MAX_HBB_POSTQUEUE); 924 phbbmu->post_qbuffer[ending_index]=0; 925 if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) { 926 phbbmu->post_qbuffer[index]= cdb_shifted_phyaddr|ARCMSR_SRBPOST_FLAG_SGL_BSIZE; 927 } else { 928 phbbmu->post_qbuffer[index]= cdb_shifted_phyaddr; 929 } 930 index++; 931 index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */ 932 phbbmu->postq_index=index; 933 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_CDB_POSTED); 934 } 935 break; 936 case ACB_ADAPTER_TYPE_C: 937 { 938 u_int32_t ccb_post_stamp, arc_cdb_size, cdb_phyaddr_hi32; 939 940 arc_cdb_size=(srb->arc_cdb_size>0x300)?0x300:srb->arc_cdb_size; 941 ccb_post_stamp=(cdb_shifted_phyaddr | ((arc_cdb_size-1) >> 6) | 1); 942 cdb_phyaddr_hi32 = acb->srb_phyaddr.B.phyadd_high; 943 if(cdb_phyaddr_hi32) 944 { 945 CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_high, cdb_phyaddr_hi32); 946 CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp); 947 } 948 else 949 { 950 CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp); 951 } 952 } 953 break; 954 } 955 return; 956 } 957 /* 958 ************************************************************************ 959 ************************************************************************ 960 */ 961 static struct QBUFFER * arcmsr_get_iop_rqbuffer( struct AdapterControlBlock *acb) 962 { 963 struct QBUFFER *qbuffer=NULL; 964 965 switch (acb->adapter_type) { 966 case ACB_ADAPTER_TYPE_A: { 967 struct HBA_MessageUnit *phbamu=(struct HBA_MessageUnit *)acb->pmu; 968 969 qbuffer=(struct QBUFFER *)&phbamu->message_rbuffer; 970 } 971 break; 972 case ACB_ADAPTER_TYPE_B: { 973 struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu; 974 975 qbuffer=(struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_rbuffer; 976 } 977 break; 978 case ACB_ADAPTER_TYPE_C: { 979 struct HBC_MessageUnit *phbcmu=(struct HBC_MessageUnit *)acb->pmu; 980 981 qbuffer=(struct QBUFFER *)&phbcmu->message_rbuffer; 982 } 983 break; 984 } 985 return(qbuffer); 986 } 987 /* 988 ************************************************************************ 989 ************************************************************************ 990 */ 991 static struct QBUFFER * arcmsr_get_iop_wqbuffer( struct AdapterControlBlock *acb) 992 { 993 struct QBUFFER *qbuffer=NULL; 994 995 switch (acb->adapter_type) { 996 case ACB_ADAPTER_TYPE_A: { 997 struct HBA_MessageUnit *phbamu=(struct HBA_MessageUnit *)acb->pmu; 998 999 qbuffer=(struct QBUFFER *)&phbamu->message_wbuffer; 1000 } 1001 break; 1002 case ACB_ADAPTER_TYPE_B: { 1003 struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu; 1004 1005 qbuffer=(struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_wbuffer; 1006 } 1007 break; 1008 case ACB_ADAPTER_TYPE_C: { 1009 struct HBC_MessageUnit *phbcmu=(struct HBC_MessageUnit *)acb->pmu; 1010 1011 qbuffer=(struct QBUFFER *)&phbcmu->message_wbuffer; 1012 } 1013 break; 1014 } 1015 return(qbuffer); 1016 } 1017 /* 1018 ************************************************************************** 1019 ************************************************************************** 1020 */ 1021 static void arcmsr_iop_message_read(struct AdapterControlBlock *acb) 1022 { 1023 switch (acb->adapter_type) { 1024 case ACB_ADAPTER_TYPE_A: { 1025 /* let IOP know data has been read */ 1026 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 1027 } 1028 break; 1029 case ACB_ADAPTER_TYPE_B: { 1030 /* let IOP know data has been read */ 1031 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK); 1032 } 1033 break; 1034 case ACB_ADAPTER_TYPE_C: { 1035 /* let IOP know data has been read */ 1036 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK); 1037 } 1038 } 1039 return; 1040 } 1041 /* 1042 ************************************************************************** 1043 ************************************************************************** 1044 */ 1045 static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb) 1046 { 1047 switch (acb->adapter_type) { 1048 case ACB_ADAPTER_TYPE_A: { 1049 /* 1050 ** push inbound doorbell tell iop, driver data write ok 1051 ** and wait reply on next hwinterrupt for next Qbuffer post 1052 */ 1053 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK); 1054 } 1055 break; 1056 case ACB_ADAPTER_TYPE_B: { 1057 /* 1058 ** push inbound doorbell tell iop, driver data write ok 1059 ** and wait reply on next hwinterrupt for next Qbuffer post 1060 */ 1061 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_WRITE_OK); 1062 } 1063 break; 1064 case ACB_ADAPTER_TYPE_C: { 1065 /* 1066 ** push inbound doorbell tell iop, driver data write ok 1067 ** and wait reply on next hwinterrupt for next Qbuffer post 1068 */ 1069 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK); 1070 } 1071 break; 1072 } 1073 } 1074 /* 1075 ********************************************************************** 1076 ********************************************************************** 1077 */ 1078 static void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb) 1079 { 1080 u_int8_t *pQbuffer; 1081 struct QBUFFER *pwbuffer; 1082 u_int8_t * iop_data; 1083 int32_t allxfer_len=0; 1084 1085 pwbuffer=arcmsr_get_iop_wqbuffer(acb); 1086 iop_data=(u_int8_t *)pwbuffer->data; 1087 if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) { 1088 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ); 1089 while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex) 1090 && (allxfer_len<124)) { 1091 pQbuffer=&acb->wqbuffer[acb->wqbuf_firstindex]; 1092 memcpy(iop_data, pQbuffer, 1); 1093 acb->wqbuf_firstindex++; 1094 acb->wqbuf_firstindex %=ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */ 1095 iop_data++; 1096 allxfer_len++; 1097 } 1098 pwbuffer->data_len=allxfer_len; 1099 /* 1100 ** push inbound doorbell and wait reply at hwinterrupt routine for next Qbuffer post 1101 */ 1102 arcmsr_iop_message_wrote(acb); 1103 } 1104 return; 1105 } 1106 /* 1107 ************************************************************************ 1108 ************************************************************************ 1109 */ 1110 static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb) 1111 { 1112 acb->acb_flags &=~ACB_F_MSG_START_BGRB; 1113 CHIP_REG_WRITE32(HBA_MessageUnit, 1114 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB); 1115 if(!arcmsr_hba_wait_msgint_ready(acb)) { 1116 kprintf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n" 1117 , acb->pci_unit); 1118 } 1119 return; 1120 } 1121 /* 1122 ************************************************************************ 1123 ************************************************************************ 1124 */ 1125 static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb) 1126 { 1127 acb->acb_flags &= ~ACB_F_MSG_START_BGRB; 1128 CHIP_REG_WRITE32(HBB_DOORBELL, 1129 0, drv2iop_doorbell, ARCMSR_MESSAGE_STOP_BGRB); 1130 if(!arcmsr_hbb_wait_msgint_ready(acb)) { 1131 kprintf( "arcmsr%d: wait 'stop adapter background rebulid' timeout \n" 1132 , acb->pci_unit); 1133 } 1134 return; 1135 } 1136 /* 1137 ************************************************************************ 1138 ************************************************************************ 1139 */ 1140 static void arcmsr_stop_hbc_bgrb(struct AdapterControlBlock *acb) 1141 { 1142 acb->acb_flags &=~ACB_F_MSG_START_BGRB; 1143 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB); 1144 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); 1145 if(!arcmsr_hbc_wait_msgint_ready(acb)) { 1146 kprintf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n", acb->pci_unit); 1147 } 1148 return; 1149 } 1150 /* 1151 ************************************************************************ 1152 ************************************************************************ 1153 */ 1154 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb) 1155 { 1156 switch (acb->adapter_type) { 1157 case ACB_ADAPTER_TYPE_A: { 1158 arcmsr_stop_hba_bgrb(acb); 1159 } 1160 break; 1161 case ACB_ADAPTER_TYPE_B: { 1162 arcmsr_stop_hbb_bgrb(acb); 1163 } 1164 break; 1165 case ACB_ADAPTER_TYPE_C: { 1166 arcmsr_stop_hbc_bgrb(acb); 1167 } 1168 break; 1169 } 1170 return; 1171 } 1172 /* 1173 ************************************************************************ 1174 ************************************************************************ 1175 */ 1176 static void arcmsr_poll(struct cam_sim * psim) 1177 { 1178 struct AdapterControlBlock *acb; 1179 1180 acb = (struct AdapterControlBlock *)cam_sim_softc(psim); 1181 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); 1182 arcmsr_interrupt(acb); 1183 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 1184 return; 1185 } 1186 /* 1187 ************************************************************************** 1188 ************************************************************************** 1189 */ 1190 static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb) 1191 { 1192 struct QBUFFER *prbuffer; 1193 u_int8_t *pQbuffer; 1194 u_int8_t *iop_data; 1195 int my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex; 1196 1197 /*check this iop data if overflow my rqbuffer*/ 1198 rqbuf_lastindex=acb->rqbuf_lastindex; 1199 rqbuf_firstindex=acb->rqbuf_firstindex; 1200 prbuffer=arcmsr_get_iop_rqbuffer(acb); 1201 iop_data=(u_int8_t *)prbuffer->data; 1202 iop_len=prbuffer->data_len; 1203 my_empty_len=(rqbuf_firstindex-rqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1); 1204 if(my_empty_len>=iop_len) { 1205 while(iop_len > 0) { 1206 pQbuffer=&acb->rqbuffer[rqbuf_lastindex]; 1207 memcpy(pQbuffer, iop_data, 1); 1208 rqbuf_lastindex++; 1209 rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;/*if last index number set it to 0 */ 1210 iop_data++; 1211 iop_len--; 1212 } 1213 acb->rqbuf_lastindex=rqbuf_lastindex; 1214 arcmsr_iop_message_read(acb); 1215 /*signature, let IOP know data has been read */ 1216 } else { 1217 acb->acb_flags|=ACB_F_IOPDATA_OVERFLOW; 1218 } 1219 return; 1220 } 1221 /* 1222 ************************************************************************** 1223 ************************************************************************** 1224 */ 1225 static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb) 1226 { 1227 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READ; 1228 /* 1229 ***************************************************************** 1230 ** check if there are any mail packages from user space program 1231 ** in my post bag, now is the time to send them into Areca's firmware 1232 ***************************************************************** 1233 */ 1234 if(acb->wqbuf_firstindex!=acb->wqbuf_lastindex) { 1235 u_int8_t *pQbuffer; 1236 struct QBUFFER *pwbuffer; 1237 u_int8_t *iop_data; 1238 int allxfer_len=0; 1239 1240 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ); 1241 pwbuffer=arcmsr_get_iop_wqbuffer(acb); 1242 iop_data=(u_int8_t *)pwbuffer->data; 1243 while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex) 1244 && (allxfer_len<124)) { 1245 pQbuffer=&acb->wqbuffer[acb->wqbuf_firstindex]; 1246 memcpy(iop_data, pQbuffer, 1); 1247 acb->wqbuf_firstindex++; 1248 acb->wqbuf_firstindex %=ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */ 1249 iop_data++; 1250 allxfer_len++; 1251 } 1252 pwbuffer->data_len=allxfer_len; 1253 /* 1254 ** push inbound doorbell tell iop driver data write ok 1255 ** and wait reply on next hwinterrupt for next Qbuffer post 1256 */ 1257 arcmsr_iop_message_wrote(acb); 1258 } 1259 if(acb->wqbuf_firstindex==acb->wqbuf_lastindex) { 1260 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED; 1261 } 1262 return; 1263 } 1264 1265 static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb) 1266 { 1267 /* 1268 if (ccb->ccb_h.status != CAM_REQ_CMP) 1269 kprintf("arcmsr_rescanLun_cb: Rescan Target=%x, lun=%x, failure status=%x\n",ccb->ccb_h.target_id,ccb->ccb_h.target_lun,ccb->ccb_h.status); 1270 else 1271 kprintf("arcmsr_rescanLun_cb: Rescan lun successfully!\n"); 1272 */ 1273 xpt_free_path(ccb->ccb_h.path); 1274 } 1275 1276 static void arcmsr_rescan_lun(struct AdapterControlBlock *acb, int target, int lun) 1277 { 1278 struct cam_path *path; 1279 union ccb ccb; 1280 1281 if (xpt_create_path(&path, xpt_periph, cam_sim_path(acb->psim), target, lun) != CAM_REQ_CMP) 1282 return; 1283 /* kprintf("arcmsr_rescan_lun: Rescan Target=%x, Lun=%x\n", target, lun); */ 1284 bzero(&ccb, sizeof(union ccb)); 1285 xpt_setup_ccb(&ccb.ccb_h, path, 5); 1286 ccb.ccb_h.func_code = XPT_SCAN_LUN; 1287 ccb.ccb_h.cbfcnp = arcmsr_rescanLun_cb; 1288 ccb.crcn.flags = CAM_FLAG_NONE; 1289 xpt_action(&ccb); 1290 return; 1291 } 1292 1293 1294 static void arcmsr_abort_dr_ccbs(struct AdapterControlBlock *acb, int target, int lun) 1295 { 1296 struct CommandControlBlock *srb; 1297 u_int32_t intmask_org; 1298 int i; 1299 1300 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); 1301 /* disable all outbound interrupts */ 1302 intmask_org = arcmsr_disable_allintr(acb); 1303 for (i = 0; i < ARCMSR_MAX_FREESRB_NUM; i++) 1304 { 1305 srb = acb->psrb_pool[i]; 1306 if (srb->startdone == ARCMSR_SRB_START) 1307 { 1308 if((target == srb->pccb->ccb_h.target_id) && (lun == srb->pccb->ccb_h.target_lun)) 1309 { 1310 srb->startdone = ARCMSR_SRB_ABORTED; 1311 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 1312 arcmsr_srb_complete(srb, 1); 1313 } 1314 } 1315 } 1316 /* enable outbound Post Queue, outbound doorbell Interrupt */ 1317 arcmsr_enable_allintr(acb, intmask_org); 1318 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 1319 } 1320 1321 1322 /* 1323 ************************************************************************** 1324 ************************************************************************** 1325 */ 1326 static void arcmsr_dr_handle(struct AdapterControlBlock *acb) { 1327 u_int32_t devicemap; 1328 u_int32_t target, lun; 1329 u_int32_t deviceMapCurrent[4]={0}; 1330 u_int8_t *pDevMap; 1331 1332 switch (acb->adapter_type) { 1333 case ACB_ADAPTER_TYPE_A: 1334 devicemap = offsetof(struct HBA_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); 1335 for (target= 0; target < 4; target++) 1336 { 1337 deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap); 1338 devicemap += 4; 1339 } 1340 break; 1341 1342 case ACB_ADAPTER_TYPE_B: 1343 devicemap = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); 1344 for (target= 0; target < 4; target++) 1345 { 1346 deviceMapCurrent[target]=bus_space_read_4(acb->btag[1], acb->bhandle[1], devicemap); 1347 devicemap += 4; 1348 } 1349 break; 1350 1351 case ACB_ADAPTER_TYPE_C: 1352 devicemap = offsetof(struct HBC_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); 1353 for (target= 0; target < 4; target++) 1354 { 1355 deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap); 1356 devicemap += 4; 1357 } 1358 break; 1359 } 1360 if(acb->acb_flags & ACB_F_BUS_HANG_ON) 1361 { 1362 acb->acb_flags &= ~ACB_F_BUS_HANG_ON; 1363 } 1364 /* 1365 ** adapter posted CONFIG message 1366 ** copy the new map, note if there are differences with the current map 1367 */ 1368 pDevMap = (u_int8_t *)&deviceMapCurrent[0]; 1369 for (target= 0; target < ARCMSR_MAX_TARGETID - 1; target++) 1370 { 1371 if (*pDevMap != acb->device_map[target]) 1372 { 1373 u_int8_t difference, bit_check; 1374 1375 difference= *pDevMap ^ acb->device_map[target]; 1376 for(lun=0; lun < ARCMSR_MAX_TARGETLUN; lun++) 1377 { 1378 bit_check=(1 << lun); /*check bit from 0....31*/ 1379 if(difference & bit_check) 1380 { 1381 if(acb->device_map[target] & bit_check) 1382 {/* unit departed */ 1383 kprintf("arcmsr_dr_handle: Target=%x, lun=%x, GONE!!!\n",target,lun); 1384 arcmsr_abort_dr_ccbs(acb, target, lun); 1385 arcmsr_rescan_lun(acb, target, lun); 1386 acb->devstate[target][lun] = ARECA_RAID_GONE; 1387 } 1388 else 1389 {/* unit arrived */ 1390 kprintf("arcmsr_dr_handle: Target=%x, lun=%x, ARRIVING!!!\n",target,lun); 1391 arcmsr_rescan_lun(acb, target, lun); 1392 acb->devstate[target][lun] = ARECA_RAID_GOOD; 1393 } 1394 } 1395 } 1396 /* kprintf("arcmsr_dr_handle: acb->device_map[%x]=0x%x, deviceMapCurrent[%x]=%x\n",target,acb->device_map[target],target,*pDevMap); */ 1397 acb->device_map[target]= *pDevMap; 1398 } 1399 pDevMap++; 1400 } 1401 } 1402 /* 1403 ************************************************************************** 1404 ************************************************************************** 1405 */ 1406 static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb) { 1407 u_int32_t outbound_message; 1408 1409 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT); 1410 outbound_message = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[0]); 1411 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG) 1412 arcmsr_dr_handle( acb ); 1413 } 1414 /* 1415 ************************************************************************** 1416 ************************************************************************** 1417 */ 1418 static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb) { 1419 u_int32_t outbound_message; 1420 1421 /* clear interrupts */ 1422 CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN); 1423 outbound_message = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0]); 1424 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG) 1425 arcmsr_dr_handle( acb ); 1426 } 1427 /* 1428 ************************************************************************** 1429 ************************************************************************** 1430 */ 1431 static void arcmsr_hbc_message_isr(struct AdapterControlBlock *acb) { 1432 u_int32_t outbound_message; 1433 1434 CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR); 1435 outbound_message = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[0]); 1436 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG) 1437 arcmsr_dr_handle( acb ); 1438 } 1439 /* 1440 ************************************************************************** 1441 ************************************************************************** 1442 */ 1443 static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb) 1444 { 1445 u_int32_t outbound_doorbell; 1446 1447 /* 1448 ******************************************************************* 1449 ** Maybe here we need to check wrqbuffer_lock is lock or not 1450 ** DOORBELL: din! don! 1451 ** check if there are any mail need to pack from firmware 1452 ******************************************************************* 1453 */ 1454 outbound_doorbell=CHIP_REG_READ32(HBA_MessageUnit, 1455 0, outbound_doorbell); 1456 CHIP_REG_WRITE32(HBA_MessageUnit, 1457 0, outbound_doorbell, outbound_doorbell); /* clear doorbell interrupt */ 1458 if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) { 1459 arcmsr_iop2drv_data_wrote_handle(acb); 1460 } 1461 if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) { 1462 arcmsr_iop2drv_data_read_handle(acb); 1463 } 1464 return; 1465 } 1466 /* 1467 ************************************************************************** 1468 ************************************************************************** 1469 */ 1470 static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *acb) 1471 { 1472 u_int32_t outbound_doorbell; 1473 1474 /* 1475 ******************************************************************* 1476 ** Maybe here we need to check wrqbuffer_lock is lock or not 1477 ** DOORBELL: din! don! 1478 ** check if there are any mail need to pack from firmware 1479 ******************************************************************* 1480 */ 1481 outbound_doorbell=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell); 1482 CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell); /* clear doorbell interrupt */ 1483 if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) { 1484 arcmsr_iop2drv_data_wrote_handle(acb); 1485 } 1486 if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) { 1487 arcmsr_iop2drv_data_read_handle(acb); 1488 } 1489 if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { 1490 arcmsr_hbc_message_isr(acb); /* messenger of "driver to iop commands" */ 1491 } 1492 return; 1493 } 1494 /* 1495 ************************************************************************** 1496 ************************************************************************** 1497 */ 1498 static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb) 1499 { 1500 u_int32_t flag_srb; 1501 u_int16_t error; 1502 1503 /* 1504 ***************************************************************************** 1505 ** areca cdb command done 1506 ***************************************************************************** 1507 */ 1508 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, 1509 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1510 while((flag_srb=CHIP_REG_READ32(HBA_MessageUnit, 1511 0, outbound_queueport)) != 0xFFFFFFFF) { 1512 /* check if command done with no error*/ 1513 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; 1514 arcmsr_drain_donequeue(acb, flag_srb, error); 1515 } /*drain reply FIFO*/ 1516 return; 1517 } 1518 /* 1519 ************************************************************************** 1520 ************************************************************************** 1521 */ 1522 static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb) 1523 { 1524 struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu; 1525 u_int32_t flag_srb; 1526 int index; 1527 u_int16_t error; 1528 1529 /* 1530 ***************************************************************************** 1531 ** areca cdb command done 1532 ***************************************************************************** 1533 */ 1534 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, 1535 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1536 index=phbbmu->doneq_index; 1537 while((flag_srb=phbbmu->done_qbuffer[index]) != 0) { 1538 phbbmu->done_qbuffer[index]=0; 1539 index++; 1540 index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */ 1541 phbbmu->doneq_index=index; 1542 /* check if command done with no error*/ 1543 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; 1544 arcmsr_drain_donequeue(acb, flag_srb, error); 1545 } /*drain reply FIFO*/ 1546 return; 1547 } 1548 /* 1549 ************************************************************************** 1550 ************************************************************************** 1551 */ 1552 static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb) 1553 { 1554 u_int32_t flag_srb,throttling=0; 1555 u_int16_t error; 1556 1557 /* 1558 ***************************************************************************** 1559 ** areca cdb command done 1560 ***************************************************************************** 1561 */ 1562 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1563 1564 while(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) { 1565 1566 flag_srb=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low); 1567 /* check if command done with no error*/ 1568 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE; 1569 arcmsr_drain_donequeue(acb, flag_srb, error); 1570 if(throttling==ARCMSR_HBC_ISR_THROTTLING_LEVEL) { 1571 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING); 1572 break; 1573 } 1574 throttling++; 1575 } /*drain reply FIFO*/ 1576 return; 1577 } 1578 /* 1579 ********************************************************************** 1580 ********************************************************************** 1581 */ 1582 static void arcmsr_handle_hba_isr( struct AdapterControlBlock *acb) 1583 { 1584 u_int32_t outbound_intstatus; 1585 /* 1586 ********************************************* 1587 ** check outbound intstatus 1588 ********************************************* 1589 */ 1590 outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable; 1591 if(!outbound_intstatus) { 1592 /*it must be share irq*/ 1593 return; 1594 } 1595 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);/*clear interrupt*/ 1596 /* MU doorbell interrupts*/ 1597 if(outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) { 1598 arcmsr_hba_doorbell_isr(acb); 1599 } 1600 /* MU post queue interrupts*/ 1601 if(outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) { 1602 arcmsr_hba_postqueue_isr(acb); 1603 } 1604 if(outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { 1605 arcmsr_hba_message_isr(acb); 1606 } 1607 return; 1608 } 1609 /* 1610 ********************************************************************** 1611 ********************************************************************** 1612 */ 1613 static void arcmsr_handle_hbb_isr( struct AdapterControlBlock *acb) 1614 { 1615 u_int32_t outbound_doorbell; 1616 /* 1617 ********************************************* 1618 ** check outbound intstatus 1619 ********************************************* 1620 */ 1621 outbound_doorbell=CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & acb->outbound_int_enable; 1622 if(!outbound_doorbell) { 1623 /*it must be share irq*/ 1624 return; 1625 } 1626 CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ~outbound_doorbell); /* clear doorbell interrupt */ 1627 CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell); 1628 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT); 1629 /* MU ioctl transfer doorbell interrupts*/ 1630 if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) { 1631 arcmsr_iop2drv_data_wrote_handle(acb); 1632 } 1633 if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) { 1634 arcmsr_iop2drv_data_read_handle(acb); 1635 } 1636 /* MU post queue interrupts*/ 1637 if(outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) { 1638 arcmsr_hbb_postqueue_isr(acb); 1639 } 1640 if(outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { 1641 arcmsr_hbb_message_isr(acb); 1642 } 1643 return; 1644 } 1645 /* 1646 ********************************************************************** 1647 ********************************************************************** 1648 */ 1649 static void arcmsr_handle_hbc_isr( struct AdapterControlBlock *acb) 1650 { 1651 u_int32_t host_interrupt_status; 1652 /* 1653 ********************************************* 1654 ** check outbound intstatus 1655 ********************************************* 1656 */ 1657 host_interrupt_status=CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status); 1658 if(!host_interrupt_status) { 1659 /*it must be share irq*/ 1660 return; 1661 } 1662 /* MU doorbell interrupts*/ 1663 if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) { 1664 arcmsr_hbc_doorbell_isr(acb); 1665 } 1666 /* MU post queue interrupts*/ 1667 if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) { 1668 arcmsr_hbc_postqueue_isr(acb); 1669 } 1670 return; 1671 } 1672 /* 1673 ****************************************************************************** 1674 ****************************************************************************** 1675 */ 1676 static void arcmsr_interrupt(struct AdapterControlBlock *acb) 1677 { 1678 switch (acb->adapter_type) { 1679 case ACB_ADAPTER_TYPE_A: 1680 arcmsr_handle_hba_isr(acb); 1681 break; 1682 case ACB_ADAPTER_TYPE_B: 1683 arcmsr_handle_hbb_isr(acb); 1684 break; 1685 case ACB_ADAPTER_TYPE_C: 1686 arcmsr_handle_hbc_isr(acb); 1687 break; 1688 default: 1689 kprintf("arcmsr%d: interrupt service," 1690 " unknow adapter type =%d\n", acb->pci_unit, acb->adapter_type); 1691 break; 1692 } 1693 return; 1694 } 1695 /* 1696 ********************************************************************** 1697 ********************************************************************** 1698 */ 1699 static void arcmsr_intr_handler(void *arg) 1700 { 1701 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)arg; 1702 1703 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); 1704 arcmsr_interrupt(acb); 1705 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 1706 } 1707 /* 1708 ****************************************************************************** 1709 ****************************************************************************** 1710 */ 1711 static void arcmsr_polling_devmap(void* arg) 1712 { 1713 struct AdapterControlBlock *acb = (struct AdapterControlBlock *)arg; 1714 switch (acb->adapter_type) { 1715 case ACB_ADAPTER_TYPE_A: 1716 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); 1717 break; 1718 1719 case ACB_ADAPTER_TYPE_B: 1720 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG); 1721 break; 1722 1723 case ACB_ADAPTER_TYPE_C: 1724 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); 1725 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); 1726 break; 1727 } 1728 1729 if((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0) 1730 { 1731 callout_reset(&acb->devmap_callout, 5 * hz, arcmsr_polling_devmap, acb); /* polling per 5 seconds */ 1732 } 1733 } 1734 1735 /* 1736 ******************************************************************************* 1737 ** 1738 ******************************************************************************* 1739 */ 1740 static void arcmsr_iop_parking(struct AdapterControlBlock *acb) 1741 { 1742 u_int32_t intmask_org; 1743 1744 if(acb!=NULL) { 1745 /* stop adapter background rebuild */ 1746 if(acb->acb_flags & ACB_F_MSG_START_BGRB) { 1747 intmask_org = arcmsr_disable_allintr(acb); 1748 arcmsr_stop_adapter_bgrb(acb); 1749 arcmsr_flush_adapter_cache(acb); 1750 arcmsr_enable_allintr(acb, intmask_org); 1751 } 1752 } 1753 } 1754 /* 1755 *********************************************************************** 1756 ** 1757 ************************************************************************ 1758 */ 1759 u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg) 1760 { 1761 struct CMD_MESSAGE_FIELD * pcmdmessagefld; 1762 u_int32_t retvalue=EINVAL; 1763 1764 pcmdmessagefld=(struct CMD_MESSAGE_FIELD *) arg; 1765 if(memcmp(pcmdmessagefld->cmdmessage.Signature, "ARCMSR", 6)!=0) { 1766 return retvalue; 1767 } 1768 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); 1769 switch(ioctl_cmd) { 1770 case ARCMSR_MESSAGE_READ_RQBUFFER: { 1771 u_int8_t * pQbuffer; 1772 u_int8_t * ptmpQbuffer=pcmdmessagefld->messagedatabuffer; 1773 u_int32_t allxfer_len=0; 1774 1775 while((acb->rqbuf_firstindex!=acb->rqbuf_lastindex) 1776 && (allxfer_len<1031)) { 1777 /*copy READ QBUFFER to srb*/ 1778 pQbuffer= &acb->rqbuffer[acb->rqbuf_firstindex]; 1779 memcpy(ptmpQbuffer, pQbuffer, 1); 1780 acb->rqbuf_firstindex++; 1781 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; 1782 /*if last index number set it to 0 */ 1783 ptmpQbuffer++; 1784 allxfer_len++; 1785 } 1786 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1787 struct QBUFFER * prbuffer; 1788 u_int8_t * iop_data; 1789 u_int32_t iop_len; 1790 1791 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1792 prbuffer=arcmsr_get_iop_rqbuffer(acb); 1793 iop_data=(u_int8_t *)prbuffer->data; 1794 iop_len=(u_int32_t)prbuffer->data_len; 1795 /*this iop data does no chance to make me overflow again here, so just do it*/ 1796 while(iop_len>0) { 1797 pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex]; 1798 memcpy(pQbuffer, iop_data, 1); 1799 acb->rqbuf_lastindex++; 1800 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 1801 /*if last index number set it to 0 */ 1802 iop_data++; 1803 iop_len--; 1804 } 1805 arcmsr_iop_message_read(acb); 1806 /*signature, let IOP know data has been readed */ 1807 } 1808 pcmdmessagefld->cmdmessage.Length=allxfer_len; 1809 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 1810 retvalue=ARCMSR_MESSAGE_SUCCESS; 1811 } 1812 break; 1813 case ARCMSR_MESSAGE_WRITE_WQBUFFER: { 1814 u_int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; 1815 u_int8_t * pQbuffer; 1816 u_int8_t * ptmpuserbuffer=pcmdmessagefld->messagedatabuffer; 1817 1818 user_len=pcmdmessagefld->cmdmessage.Length; 1819 /*check if data xfer length of this request will overflow my array qbuffer */ 1820 wqbuf_lastindex=acb->wqbuf_lastindex; 1821 wqbuf_firstindex=acb->wqbuf_firstindex; 1822 if(wqbuf_lastindex!=wqbuf_firstindex) { 1823 arcmsr_post_ioctldata2iop(acb); 1824 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR; 1825 } else { 1826 my_empty_len=(wqbuf_firstindex-wqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1); 1827 if(my_empty_len>=user_len) { 1828 while(user_len>0) { 1829 /*copy srb data to wqbuffer*/ 1830 pQbuffer= &acb->wqbuffer[acb->wqbuf_lastindex]; 1831 memcpy(pQbuffer, ptmpuserbuffer, 1); 1832 acb->wqbuf_lastindex++; 1833 acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 1834 /*if last index number set it to 0 */ 1835 ptmpuserbuffer++; 1836 user_len--; 1837 } 1838 /*post fist Qbuffer*/ 1839 if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { 1840 acb->acb_flags &=~ACB_F_MESSAGE_WQBUFFER_CLEARED; 1841 arcmsr_post_ioctldata2iop(acb); 1842 } 1843 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 1844 } else { 1845 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR; 1846 } 1847 } 1848 retvalue=ARCMSR_MESSAGE_SUCCESS; 1849 } 1850 break; 1851 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { 1852 u_int8_t * pQbuffer=acb->rqbuffer; 1853 1854 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1855 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1856 arcmsr_iop_message_read(acb); 1857 /*signature, let IOP know data has been readed */ 1858 } 1859 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; 1860 acb->rqbuf_firstindex=0; 1861 acb->rqbuf_lastindex=0; 1862 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 1863 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 1864 retvalue=ARCMSR_MESSAGE_SUCCESS; 1865 } 1866 break; 1867 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: 1868 { 1869 u_int8_t * pQbuffer=acb->wqbuffer; 1870 1871 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1872 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1873 arcmsr_iop_message_read(acb); 1874 /*signature, let IOP know data has been readed */ 1875 } 1876 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ); 1877 acb->wqbuf_firstindex=0; 1878 acb->wqbuf_lastindex=0; 1879 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 1880 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 1881 retvalue=ARCMSR_MESSAGE_SUCCESS; 1882 } 1883 break; 1884 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { 1885 u_int8_t * pQbuffer; 1886 1887 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1888 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1889 arcmsr_iop_message_read(acb); 1890 /*signature, let IOP know data has been readed */ 1891 } 1892 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED 1893 |ACB_F_MESSAGE_RQBUFFER_CLEARED 1894 |ACB_F_MESSAGE_WQBUFFER_READ); 1895 acb->rqbuf_firstindex=0; 1896 acb->rqbuf_lastindex=0; 1897 acb->wqbuf_firstindex=0; 1898 acb->wqbuf_lastindex=0; 1899 pQbuffer=acb->rqbuffer; 1900 memset(pQbuffer, 0, sizeof(struct QBUFFER)); 1901 pQbuffer=acb->wqbuffer; 1902 memset(pQbuffer, 0, sizeof(struct QBUFFER)); 1903 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 1904 retvalue=ARCMSR_MESSAGE_SUCCESS; 1905 } 1906 break; 1907 case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: { 1908 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_3F; 1909 retvalue=ARCMSR_MESSAGE_SUCCESS; 1910 } 1911 break; 1912 case ARCMSR_MESSAGE_SAY_HELLO: { 1913 u_int8_t * hello_string="Hello! I am ARCMSR"; 1914 u_int8_t * puserbuffer=(u_int8_t *)pcmdmessagefld->messagedatabuffer; 1915 1916 if(memcpy(puserbuffer, hello_string, (int16_t)strlen(hello_string))) { 1917 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR; 1918 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 1919 return ENOIOCTL; 1920 } 1921 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 1922 retvalue=ARCMSR_MESSAGE_SUCCESS; 1923 } 1924 break; 1925 case ARCMSR_MESSAGE_SAY_GOODBYE: { 1926 arcmsr_iop_parking(acb); 1927 retvalue=ARCMSR_MESSAGE_SUCCESS; 1928 } 1929 break; 1930 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: { 1931 arcmsr_flush_adapter_cache(acb); 1932 retvalue=ARCMSR_MESSAGE_SUCCESS; 1933 } 1934 break; 1935 } 1936 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 1937 return retvalue; 1938 } 1939 /* 1940 ************************************************************************** 1941 ************************************************************************** 1942 */ 1943 struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb) 1944 { 1945 struct CommandControlBlock *srb=NULL; 1946 u_int32_t workingsrb_startindex, workingsrb_doneindex; 1947 1948 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); 1949 workingsrb_doneindex=acb->workingsrb_doneindex; 1950 workingsrb_startindex=acb->workingsrb_startindex; 1951 srb=acb->srbworkingQ[workingsrb_startindex]; 1952 workingsrb_startindex++; 1953 workingsrb_startindex %= ARCMSR_MAX_FREESRB_NUM; 1954 if(workingsrb_doneindex!=workingsrb_startindex) { 1955 acb->workingsrb_startindex=workingsrb_startindex; 1956 } else { 1957 srb=NULL; 1958 } 1959 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 1960 return(srb); 1961 } 1962 /* 1963 ************************************************************************** 1964 ************************************************************************** 1965 */ 1966 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb) 1967 { 1968 struct CMD_MESSAGE_FIELD * pcmdmessagefld; 1969 int retvalue = 0, transfer_len = 0; 1970 char *buffer; 1971 u_int32_t controlcode = (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[5] << 24 | 1972 (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[6] << 16 | 1973 (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[7] << 8 | 1974 (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[8]; 1975 /* 4 bytes: Areca io control code */ 1976 if((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 1977 buffer = pccb->csio.data_ptr; 1978 transfer_len = pccb->csio.dxfer_len; 1979 } else { 1980 retvalue = ARCMSR_MESSAGE_FAIL; 1981 goto message_out; 1982 } 1983 if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) { 1984 retvalue = ARCMSR_MESSAGE_FAIL; 1985 goto message_out; 1986 } 1987 pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer; 1988 switch(controlcode) { 1989 case ARCMSR_MESSAGE_READ_RQBUFFER: { 1990 u_int8_t *pQbuffer; 1991 u_int8_t *ptmpQbuffer=pcmdmessagefld->messagedatabuffer; 1992 int32_t allxfer_len = 0; 1993 1994 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) 1995 && (allxfer_len < 1031)) { 1996 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; 1997 memcpy(ptmpQbuffer, pQbuffer, 1); 1998 acb->rqbuf_firstindex++; 1999 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; 2000 ptmpQbuffer++; 2001 allxfer_len++; 2002 } 2003 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 2004 struct QBUFFER *prbuffer; 2005 u_int8_t *iop_data; 2006 int32_t iop_len; 2007 2008 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 2009 prbuffer=arcmsr_get_iop_rqbuffer(acb); 2010 iop_data = (u_int8_t *)prbuffer->data; 2011 iop_len =(u_int32_t)prbuffer->data_len; 2012 while (iop_len > 0) { 2013 pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex]; 2014 memcpy(pQbuffer, iop_data, 1); 2015 acb->rqbuf_lastindex++; 2016 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 2017 iop_data++; 2018 iop_len--; 2019 } 2020 arcmsr_iop_message_read(acb); 2021 } 2022 pcmdmessagefld->cmdmessage.Length = allxfer_len; 2023 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 2024 retvalue=ARCMSR_MESSAGE_SUCCESS; 2025 } 2026 break; 2027 case ARCMSR_MESSAGE_WRITE_WQBUFFER: { 2028 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; 2029 u_int8_t *pQbuffer; 2030 u_int8_t *ptmpuserbuffer=pcmdmessagefld->messagedatabuffer; 2031 2032 user_len = pcmdmessagefld->cmdmessage.Length; 2033 wqbuf_lastindex = acb->wqbuf_lastindex; 2034 wqbuf_firstindex = acb->wqbuf_firstindex; 2035 if (wqbuf_lastindex != wqbuf_firstindex) { 2036 arcmsr_post_ioctldata2iop(acb); 2037 /* has error report sensedata */ 2038 if(&pccb->csio.sense_data) { 2039 ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); 2040 /* Valid,ErrorCode */ 2041 ((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05; 2042 /* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */ 2043 ((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A; 2044 /* AdditionalSenseLength */ 2045 ((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20; 2046 /* AdditionalSenseCode */ 2047 } 2048 retvalue = ARCMSR_MESSAGE_FAIL; 2049 } else { 2050 my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1) 2051 &(ARCMSR_MAX_QBUFFER - 1); 2052 if (my_empty_len >= user_len) { 2053 while (user_len > 0) { 2054 pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex]; 2055 memcpy(pQbuffer, ptmpuserbuffer, 1); 2056 acb->wqbuf_lastindex++; 2057 acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 2058 ptmpuserbuffer++; 2059 user_len--; 2060 } 2061 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { 2062 acb->acb_flags &= 2063 ~ACB_F_MESSAGE_WQBUFFER_CLEARED; 2064 arcmsr_post_ioctldata2iop(acb); 2065 } 2066 } else { 2067 /* has error report sensedata */ 2068 if(&pccb->csio.sense_data) { 2069 ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); 2070 /* Valid,ErrorCode */ 2071 ((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05; 2072 /* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */ 2073 ((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A; 2074 /* AdditionalSenseLength */ 2075 ((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20; 2076 /* AdditionalSenseCode */ 2077 } 2078 retvalue = ARCMSR_MESSAGE_FAIL; 2079 } 2080 } 2081 } 2082 break; 2083 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { 2084 u_int8_t *pQbuffer = acb->rqbuffer; 2085 2086 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 2087 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 2088 arcmsr_iop_message_read(acb); 2089 } 2090 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; 2091 acb->rqbuf_firstindex = 0; 2092 acb->rqbuf_lastindex = 0; 2093 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 2094 pcmdmessagefld->cmdmessage.ReturnCode = 2095 ARCMSR_MESSAGE_RETURNCODE_OK; 2096 } 2097 break; 2098 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { 2099 u_int8_t *pQbuffer = acb->wqbuffer; 2100 2101 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 2102 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 2103 arcmsr_iop_message_read(acb); 2104 } 2105 acb->acb_flags |= 2106 (ACB_F_MESSAGE_WQBUFFER_CLEARED | 2107 ACB_F_MESSAGE_WQBUFFER_READ); 2108 acb->wqbuf_firstindex = 0; 2109 acb->wqbuf_lastindex = 0; 2110 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 2111 pcmdmessagefld->cmdmessage.ReturnCode = 2112 ARCMSR_MESSAGE_RETURNCODE_OK; 2113 } 2114 break; 2115 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { 2116 u_int8_t *pQbuffer; 2117 2118 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 2119 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 2120 arcmsr_iop_message_read(acb); 2121 } 2122 acb->acb_flags |= 2123 (ACB_F_MESSAGE_WQBUFFER_CLEARED 2124 | ACB_F_MESSAGE_RQBUFFER_CLEARED 2125 | ACB_F_MESSAGE_WQBUFFER_READ); 2126 acb->rqbuf_firstindex = 0; 2127 acb->rqbuf_lastindex = 0; 2128 acb->wqbuf_firstindex = 0; 2129 acb->wqbuf_lastindex = 0; 2130 pQbuffer = acb->rqbuffer; 2131 memset(pQbuffer, 0, sizeof (struct QBUFFER)); 2132 pQbuffer = acb->wqbuffer; 2133 memset(pQbuffer, 0, sizeof (struct QBUFFER)); 2134 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 2135 } 2136 break; 2137 case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: { 2138 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F; 2139 } 2140 break; 2141 case ARCMSR_MESSAGE_SAY_HELLO: { 2142 int8_t * hello_string = "Hello! I am ARCMSR"; 2143 2144 memcpy(pcmdmessagefld->messagedatabuffer, hello_string 2145 , (int16_t)strlen(hello_string)); 2146 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 2147 } 2148 break; 2149 case ARCMSR_MESSAGE_SAY_GOODBYE: 2150 arcmsr_iop_parking(acb); 2151 break; 2152 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: 2153 arcmsr_flush_adapter_cache(acb); 2154 break; 2155 default: 2156 retvalue = ARCMSR_MESSAGE_FAIL; 2157 } 2158 message_out: 2159 return retvalue; 2160 } 2161 /* 2162 ********************************************************************* 2163 ********************************************************************* 2164 */ 2165 static void arcmsr_execute_srb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2166 { 2167 struct CommandControlBlock *srb=(struct CommandControlBlock *)arg; 2168 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)srb->acb; 2169 union ccb * pccb; 2170 int target, lun; 2171 2172 pccb=srb->pccb; 2173 target=pccb->ccb_h.target_id; 2174 lun=pccb->ccb_h.target_lun; 2175 if(error != 0) { 2176 if(error != EFBIG) { 2177 kprintf("arcmsr%d: unexpected error %x" 2178 " returned from 'bus_dmamap_load' \n" 2179 , acb->pci_unit, error); 2180 } 2181 if((pccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 2182 pccb->ccb_h.status |= CAM_REQ_TOO_BIG; 2183 } 2184 arcmsr_srb_complete(srb, 0); 2185 return; 2186 } 2187 if(nseg > ARCMSR_MAX_SG_ENTRIES) { 2188 pccb->ccb_h.status |= CAM_REQ_TOO_BIG; 2189 arcmsr_srb_complete(srb, 0); 2190 return; 2191 } 2192 if(acb->acb_flags & ACB_F_BUS_RESET) { 2193 kprintf("arcmsr%d: bus reset and return busy \n", acb->pci_unit); 2194 pccb->ccb_h.status |= CAM_SCSI_BUS_RESET; 2195 arcmsr_srb_complete(srb, 0); 2196 return; 2197 } 2198 if(acb->devstate[target][lun]==ARECA_RAID_GONE) { 2199 u_int8_t block_cmd; 2200 2201 block_cmd=pccb->csio.cdb_io.cdb_bytes[0] & 0x0f; 2202 if(block_cmd==0x08 || block_cmd==0x0a) { 2203 kprintf("arcmsr%d:block 'read/write' command " 2204 "with gone raid volume Cmd=%2x, TargetId=%d, Lun=%d \n" 2205 , acb->pci_unit, block_cmd, target, lun); 2206 pccb->ccb_h.status |= CAM_DEV_NOT_THERE; 2207 arcmsr_srb_complete(srb, 0); 2208 return; 2209 } 2210 } 2211 if((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 2212 if(nseg != 0) { 2213 bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap); 2214 } 2215 arcmsr_srb_complete(srb, 0); 2216 return; 2217 } 2218 if(acb->srboutstandingcount >= ARCMSR_MAX_OUTSTANDING_CMD) { 2219 xpt_freeze_simq(acb->psim, 1); 2220 pccb->ccb_h.status = CAM_REQUEUE_REQ; 2221 acb->acb_flags |= ACB_F_CAM_DEV_QFRZN; 2222 arcmsr_srb_complete(srb, 0); 2223 return; 2224 } 2225 pccb->ccb_h.status |= CAM_SIM_QUEUED; 2226 arcmsr_build_srb(srb, dm_segs, nseg); 2227 /* if (pccb->ccb_h.timeout != CAM_TIME_INFINITY) 2228 callout_reset(&srb->ccb_callout, (pccb->ccb_h.timeout * hz) / 1000, arcmsr_srb_timeout, srb); 2229 */ 2230 arcmsr_post_srb(acb, srb); 2231 return; 2232 } 2233 /* 2234 ***************************************************************************************** 2235 ***************************************************************************************** 2236 */ 2237 static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb) 2238 { 2239 struct CommandControlBlock *srb; 2240 struct AdapterControlBlock *acb=(struct AdapterControlBlock *) abortccb->ccb_h.arcmsr_ccbacb_ptr; 2241 u_int32_t intmask_org; 2242 int i=0; 2243 2244 acb->num_aborts++; 2245 /* 2246 *************************************************************************** 2247 ** It is the upper layer do abort command this lock just prior to calling us. 2248 ** First determine if we currently own this command. 2249 ** Start by searching the device queue. If not found 2250 ** at all, and the system wanted us to just abort the 2251 ** command return success. 2252 *************************************************************************** 2253 */ 2254 if(acb->srboutstandingcount!=0) { 2255 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) { 2256 srb=acb->psrb_pool[i]; 2257 if(srb->startdone==ARCMSR_SRB_START) { 2258 if(srb->pccb==abortccb) { 2259 srb->startdone=ARCMSR_SRB_ABORTED; 2260 kprintf("arcmsr%d:scsi id=%d lun=%d abort srb '%p'" 2261 "outstanding command \n" 2262 , acb->pci_unit, abortccb->ccb_h.target_id 2263 , abortccb->ccb_h.target_lun, srb); 2264 goto abort_outstanding_cmd; 2265 } 2266 } 2267 } 2268 } 2269 return(FALSE); 2270 abort_outstanding_cmd: 2271 /* disable all outbound interrupt */ 2272 intmask_org=arcmsr_disable_allintr(acb); 2273 arcmsr_polling_srbdone(acb, srb); 2274 /* enable outbound Post Queue, outbound doorbell Interrupt */ 2275 arcmsr_enable_allintr(acb, intmask_org); 2276 return (TRUE); 2277 } 2278 /* 2279 **************************************************************************** 2280 **************************************************************************** 2281 */ 2282 static void arcmsr_bus_reset(struct AdapterControlBlock *acb) 2283 { 2284 int retry=0; 2285 2286 acb->num_resets++; 2287 acb->acb_flags |=ACB_F_BUS_RESET; 2288 while(acb->srboutstandingcount!=0 && retry < 400) { 2289 arcmsr_interrupt(acb); 2290 UDELAY(25000); 2291 retry++; 2292 } 2293 arcmsr_iop_reset(acb); 2294 acb->acb_flags &= ~ACB_F_BUS_RESET; 2295 return; 2296 } 2297 /* 2298 ************************************************************************** 2299 ************************************************************************** 2300 */ 2301 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb, 2302 union ccb * pccb) 2303 { 2304 pccb->ccb_h.status |= CAM_REQ_CMP; 2305 switch (pccb->csio.cdb_io.cdb_bytes[0]) { 2306 case INQUIRY: { 2307 unsigned char inqdata[36]; 2308 char *buffer=pccb->csio.data_ptr; 2309 2310 if (pccb->ccb_h.target_lun) { 2311 pccb->ccb_h.status |= CAM_SEL_TIMEOUT; 2312 xpt_done(pccb); 2313 return; 2314 } 2315 inqdata[0] = T_PROCESSOR; /* Periph Qualifier & Periph Dev Type */ 2316 inqdata[1] = 0; /* rem media bit & Dev Type Modifier */ 2317 inqdata[2] = 0; /* ISO, ECMA, & ANSI versions */ 2318 inqdata[3] = 0; 2319 inqdata[4] = 31; /* length of additional data */ 2320 inqdata[5] = 0; 2321 inqdata[6] = 0; 2322 inqdata[7] = 0; 2323 strncpy(&inqdata[8], "Areca ", 8); /* Vendor Identification */ 2324 strncpy(&inqdata[16], "RAID controller ", 16); /* Product Identification */ 2325 strncpy(&inqdata[32], "R001", 4); /* Product Revision */ 2326 memcpy(buffer, inqdata, sizeof(inqdata)); 2327 xpt_done(pccb); 2328 } 2329 break; 2330 case WRITE_BUFFER: 2331 case READ_BUFFER: { 2332 if (arcmsr_iop_message_xfer(acb, pccb)) { 2333 pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 2334 pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 2335 } 2336 xpt_done(pccb); 2337 } 2338 break; 2339 default: 2340 xpt_done(pccb); 2341 } 2342 } 2343 /* 2344 ********************************************************************* 2345 ********************************************************************* 2346 */ 2347 static void arcmsr_action(struct cam_sim * psim, union ccb * pccb) 2348 { 2349 struct AdapterControlBlock * acb; 2350 2351 acb=(struct AdapterControlBlock *) cam_sim_softc(psim); 2352 if(acb==NULL) { 2353 pccb->ccb_h.status |= CAM_REQ_INVALID; 2354 xpt_done(pccb); 2355 return; 2356 } 2357 switch (pccb->ccb_h.func_code) { 2358 case XPT_SCSI_IO: { 2359 struct CommandControlBlock *srb; 2360 int target=pccb->ccb_h.target_id; 2361 2362 if(target == 16) { 2363 /* virtual device for iop message transfer */ 2364 arcmsr_handle_virtual_command(acb, pccb); 2365 return; 2366 } 2367 if((srb=arcmsr_get_freesrb(acb)) == NULL) { 2368 pccb->ccb_h.status |= CAM_RESRC_UNAVAIL; 2369 xpt_done(pccb); 2370 return; 2371 } 2372 pccb->ccb_h.arcmsr_ccbsrb_ptr=srb; 2373 pccb->ccb_h.arcmsr_ccbacb_ptr=acb; 2374 srb->pccb=pccb; 2375 if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 2376 if(!(pccb->ccb_h.flags & CAM_SCATTER_VALID)) { 2377 /* Single buffer */ 2378 if(!(pccb->ccb_h.flags & CAM_DATA_PHYS)) { 2379 /* Buffer is virtual */ 2380 u_int32_t error; 2381 2382 crit_enter(); 2383 error = bus_dmamap_load(acb->dm_segs_dmat 2384 , srb->dm_segs_dmamap 2385 , pccb->csio.data_ptr 2386 , pccb->csio.dxfer_len 2387 , arcmsr_execute_srb, srb, /*flags*/0); 2388 if(error == EINPROGRESS) { 2389 xpt_freeze_simq(acb->psim, 1); 2390 pccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2391 } 2392 crit_exit(); 2393 } 2394 else { /* Buffer is physical */ 2395 struct bus_dma_segment seg; 2396 2397 seg.ds_addr = (bus_addr_t)pccb->csio.data_ptr; 2398 seg.ds_len = pccb->csio.dxfer_len; 2399 arcmsr_execute_srb(srb, &seg, 1, 0); 2400 } 2401 } else { 2402 /* Scatter/gather list */ 2403 struct bus_dma_segment *segs; 2404 2405 if((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0 2406 || (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) { 2407 pccb->ccb_h.status |= CAM_PROVIDE_FAIL; 2408 xpt_done(pccb); 2409 kfree(srb, M_DEVBUF); 2410 return; 2411 } 2412 segs=(struct bus_dma_segment *)pccb->csio.data_ptr; 2413 arcmsr_execute_srb(srb, segs, pccb->csio.sglist_cnt, 0); 2414 } 2415 } else { 2416 arcmsr_execute_srb(srb, NULL, 0, 0); 2417 } 2418 break; 2419 } 2420 case XPT_TARGET_IO: { 2421 /* target mode not yet support vendor specific commands. */ 2422 pccb->ccb_h.status |= CAM_REQ_CMP; 2423 xpt_done(pccb); 2424 break; 2425 } 2426 case XPT_PATH_INQ: { 2427 struct ccb_pathinq *cpi= &pccb->cpi; 2428 2429 cpi->version_num=1; 2430 cpi->hba_inquiry=PI_SDTR_ABLE | PI_TAG_ABLE; 2431 cpi->target_sprt=0; 2432 cpi->hba_misc=0; 2433 cpi->hba_eng_cnt=0; 2434 cpi->max_target=ARCMSR_MAX_TARGETID; /* 0-16 */ 2435 cpi->max_lun=ARCMSR_MAX_TARGETLUN; /* 0-7 */ 2436 cpi->initiator_id=ARCMSR_SCSI_INITIATOR_ID; /* 255 */ 2437 cpi->bus_id=cam_sim_bus(psim); 2438 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 2439 strncpy(cpi->hba_vid, "ARCMSR", HBA_IDLEN); 2440 strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN); 2441 cpi->unit_number=cam_sim_unit(psim); 2442 #ifdef CAM_NEW_TRAN_CODE 2443 cpi->transport = XPORT_SPI; 2444 cpi->transport_version = 2; 2445 cpi->protocol = PROTO_SCSI; 2446 cpi->protocol_version = SCSI_REV_2; 2447 #endif 2448 cpi->ccb_h.status |= CAM_REQ_CMP; 2449 xpt_done(pccb); 2450 break; 2451 } 2452 case XPT_ABORT: { 2453 union ccb *pabort_ccb; 2454 2455 pabort_ccb=pccb->cab.abort_ccb; 2456 switch (pabort_ccb->ccb_h.func_code) { 2457 case XPT_ACCEPT_TARGET_IO: 2458 case XPT_IMMED_NOTIFY: 2459 case XPT_CONT_TARGET_IO: 2460 if(arcmsr_seek_cmd2abort(pabort_ccb)==TRUE) { 2461 pabort_ccb->ccb_h.status |= CAM_REQ_ABORTED; 2462 xpt_done(pabort_ccb); 2463 pccb->ccb_h.status |= CAM_REQ_CMP; 2464 } else { 2465 xpt_print_path(pabort_ccb->ccb_h.path); 2466 kprintf("Not found\n"); 2467 pccb->ccb_h.status |= CAM_PATH_INVALID; 2468 } 2469 break; 2470 case XPT_SCSI_IO: 2471 pccb->ccb_h.status |= CAM_UA_ABORT; 2472 break; 2473 default: 2474 pccb->ccb_h.status |= CAM_REQ_INVALID; 2475 break; 2476 } 2477 xpt_done(pccb); 2478 break; 2479 } 2480 case XPT_RESET_BUS: 2481 case XPT_RESET_DEV: { 2482 u_int32_t i; 2483 2484 arcmsr_bus_reset(acb); 2485 for (i=0; i < 500; i++) { 2486 DELAY(1000); 2487 } 2488 pccb->ccb_h.status |= CAM_REQ_CMP; 2489 xpt_done(pccb); 2490 break; 2491 } 2492 case XPT_TERM_IO: { 2493 pccb->ccb_h.status |= CAM_REQ_INVALID; 2494 xpt_done(pccb); 2495 break; 2496 } 2497 case XPT_GET_TRAN_SETTINGS: { 2498 struct ccb_trans_settings *cts; 2499 2500 if(pccb->ccb_h.target_id == 16) { 2501 pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; 2502 xpt_done(pccb); 2503 break; 2504 } 2505 cts= &pccb->cts; 2506 #ifdef CAM_NEW_TRAN_CODE 2507 { 2508 struct ccb_trans_settings_scsi *scsi; 2509 struct ccb_trans_settings_spi *spi; 2510 2511 scsi = &cts->proto_specific.scsi; 2512 spi = &cts->xport_specific.spi; 2513 cts->protocol = PROTO_SCSI; 2514 cts->protocol_version = SCSI_REV_2; 2515 cts->transport = XPORT_SPI; 2516 cts->transport_version = 2; 2517 spi->flags = CTS_SPI_FLAGS_DISC_ENB; 2518 spi->sync_period=3; 2519 spi->sync_offset=32; 2520 spi->bus_width=MSG_EXT_WDTR_BUS_16_BIT; 2521 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 2522 spi->valid = CTS_SPI_VALID_DISC 2523 | CTS_SPI_VALID_SYNC_RATE 2524 | CTS_SPI_VALID_SYNC_OFFSET 2525 | CTS_SPI_VALID_BUS_WIDTH; 2526 scsi->valid = CTS_SCSI_VALID_TQ; 2527 } 2528 #else 2529 { 2530 cts->flags=(CCB_TRANS_DISC_ENB | CCB_TRANS_TAG_ENB); 2531 cts->sync_period=3; 2532 cts->sync_offset=32; 2533 cts->bus_width=MSG_EXT_WDTR_BUS_16_BIT; 2534 cts->valid=CCB_TRANS_SYNC_RATE_VALID | 2535 CCB_TRANS_SYNC_OFFSET_VALID | 2536 CCB_TRANS_BUS_WIDTH_VALID | 2537 CCB_TRANS_DISC_VALID | 2538 CCB_TRANS_TQ_VALID; 2539 } 2540 #endif 2541 pccb->ccb_h.status |= CAM_REQ_CMP; 2542 xpt_done(pccb); 2543 break; 2544 } 2545 case XPT_SET_TRAN_SETTINGS: { 2546 pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; 2547 xpt_done(pccb); 2548 break; 2549 } 2550 case XPT_CALC_GEOMETRY: { 2551 struct ccb_calc_geometry *ccg; 2552 u_int32_t size_mb; 2553 u_int32_t secs_per_cylinder; 2554 2555 if(pccb->ccb_h.target_id == 16) { 2556 pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; 2557 xpt_done(pccb); 2558 break; 2559 } 2560 ccg= &pccb->ccg; 2561 if (ccg->block_size == 0) { 2562 pccb->ccb_h.status = CAM_REQ_INVALID; 2563 xpt_done(pccb); 2564 break; 2565 } 2566 if(((1024L * 1024L)/ccg->block_size) < 0) { 2567 pccb->ccb_h.status = CAM_REQ_INVALID; 2568 xpt_done(pccb); 2569 break; 2570 } 2571 size_mb=ccg->volume_size/((1024L * 1024L)/ccg->block_size); 2572 if(size_mb > 1024 ) { 2573 ccg->heads=255; 2574 ccg->secs_per_track=63; 2575 } else { 2576 ccg->heads=64; 2577 ccg->secs_per_track=32; 2578 } 2579 secs_per_cylinder=ccg->heads * ccg->secs_per_track; 2580 ccg->cylinders=ccg->volume_size / secs_per_cylinder; 2581 pccb->ccb_h.status |= CAM_REQ_CMP; 2582 xpt_done(pccb); 2583 break; 2584 } 2585 default: 2586 pccb->ccb_h.status |= CAM_REQ_INVALID; 2587 xpt_done(pccb); 2588 break; 2589 } 2590 return; 2591 } 2592 /* 2593 ********************************************************************** 2594 ********************************************************************** 2595 */ 2596 static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb) 2597 { 2598 acb->acb_flags |= ACB_F_MSG_START_BGRB; 2599 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB); 2600 if(!arcmsr_hba_wait_msgint_ready(acb)) { 2601 kprintf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); 2602 } 2603 return; 2604 } 2605 /* 2606 ********************************************************************** 2607 ********************************************************************** 2608 */ 2609 static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb) 2610 { 2611 acb->acb_flags |= ACB_F_MSG_START_BGRB; 2612 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_START_BGRB); 2613 if(!arcmsr_hbb_wait_msgint_ready(acb)) { 2614 kprintf( "arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); 2615 } 2616 return; 2617 } 2618 /* 2619 ********************************************************************** 2620 ********************************************************************** 2621 */ 2622 static void arcmsr_start_hbc_bgrb(struct AdapterControlBlock *acb) 2623 { 2624 acb->acb_flags |= ACB_F_MSG_START_BGRB; 2625 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB); 2626 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); 2627 if(!arcmsr_hbc_wait_msgint_ready(acb)) { 2628 kprintf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); 2629 } 2630 return; 2631 } 2632 /* 2633 ********************************************************************** 2634 ********************************************************************** 2635 */ 2636 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb) 2637 { 2638 switch (acb->adapter_type) { 2639 case ACB_ADAPTER_TYPE_A: 2640 arcmsr_start_hba_bgrb(acb); 2641 break; 2642 case ACB_ADAPTER_TYPE_B: 2643 arcmsr_start_hbb_bgrb(acb); 2644 break; 2645 case ACB_ADAPTER_TYPE_C: 2646 arcmsr_start_hbc_bgrb(acb); 2647 break; 2648 } 2649 return; 2650 } 2651 /* 2652 ********************************************************************** 2653 ** 2654 ********************************************************************** 2655 */ 2656 static void arcmsr_polling_hba_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) 2657 { 2658 struct CommandControlBlock *srb; 2659 u_int32_t flag_srb, outbound_intstatus, poll_srb_done=0, poll_count=0; 2660 u_int16_t error; 2661 2662 polling_ccb_retry: 2663 poll_count++; 2664 outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable; 2665 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus); /*clear interrupt*/ 2666 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2667 while(1) { 2668 if((flag_srb=CHIP_REG_READ32(HBA_MessageUnit, 2669 0, outbound_queueport))==0xFFFFFFFF) { 2670 if(poll_srb_done) { 2671 break;/*chip FIFO no ccb for completion already*/ 2672 } else { 2673 UDELAY(25000); 2674 if ((poll_count > 100) && (poll_srb != NULL)) { 2675 break; 2676 } 2677 goto polling_ccb_retry; 2678 } 2679 } 2680 /* check if command done with no error*/ 2681 srb=(struct CommandControlBlock *) 2682 (acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/ 2683 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; 2684 poll_srb_done = (srb==poll_srb) ? 1:0; 2685 if((srb->acb!=acb) || (srb->startdone!=ARCMSR_SRB_START)) { 2686 if(srb->startdone==ARCMSR_SRB_ABORTED) { 2687 kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p'" 2688 "poll command abort successfully \n" 2689 , acb->pci_unit 2690 , srb->pccb->ccb_h.target_id 2691 , srb->pccb->ccb_h.target_lun, srb); 2692 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 2693 arcmsr_srb_complete(srb, 1); 2694 continue; 2695 } 2696 kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'" 2697 "srboutstandingcount=%d \n" 2698 , acb->pci_unit 2699 , srb, acb->srboutstandingcount); 2700 continue; 2701 } 2702 arcmsr_report_srb_state(acb, srb, error); 2703 } /*drain reply FIFO*/ 2704 return; 2705 } 2706 /* 2707 ********************************************************************** 2708 ** 2709 ********************************************************************** 2710 */ 2711 static void arcmsr_polling_hbb_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) 2712 { 2713 struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu; 2714 struct CommandControlBlock *srb; 2715 u_int32_t flag_srb, poll_srb_done=0, poll_count=0; 2716 int index; 2717 u_int16_t error; 2718 2719 polling_ccb_retry: 2720 poll_count++; 2721 CHIP_REG_WRITE32(HBB_DOORBELL, 2722 0, iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */ 2723 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2724 while(1) { 2725 index=phbbmu->doneq_index; 2726 if((flag_srb=phbbmu->done_qbuffer[index]) == 0) { 2727 if(poll_srb_done) { 2728 break;/*chip FIFO no ccb for completion already*/ 2729 } else { 2730 UDELAY(25000); 2731 if ((poll_count > 100) && (poll_srb != NULL)) { 2732 break; 2733 } 2734 goto polling_ccb_retry; 2735 } 2736 } 2737 phbbmu->done_qbuffer[index]=0; 2738 index++; 2739 index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */ 2740 phbbmu->doneq_index=index; 2741 /* check if command done with no error*/ 2742 srb=(struct CommandControlBlock *) 2743 (acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/ 2744 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; 2745 poll_srb_done = (srb==poll_srb) ? 1:0; 2746 if((srb->acb!=acb) || (srb->startdone!=ARCMSR_SRB_START)) { 2747 if(srb->startdone==ARCMSR_SRB_ABORTED) { 2748 kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p'" 2749 "poll command abort successfully \n" 2750 , acb->pci_unit 2751 , srb->pccb->ccb_h.target_id 2752 , srb->pccb->ccb_h.target_lun, srb); 2753 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 2754 arcmsr_srb_complete(srb, 1); 2755 continue; 2756 } 2757 kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'" 2758 "srboutstandingcount=%d \n" 2759 , acb->pci_unit 2760 , srb, acb->srboutstandingcount); 2761 continue; 2762 } 2763 arcmsr_report_srb_state(acb, srb, error); 2764 } /*drain reply FIFO*/ 2765 return; 2766 } 2767 /* 2768 ********************************************************************** 2769 ** 2770 ********************************************************************** 2771 */ 2772 static void arcmsr_polling_hbc_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) 2773 { 2774 struct CommandControlBlock *srb; 2775 u_int32_t flag_srb, poll_srb_done=0, poll_count=0; 2776 u_int16_t error; 2777 2778 polling_ccb_retry: 2779 poll_count++; 2780 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2781 while(1) { 2782 if(!(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)) { 2783 if(poll_srb_done) { 2784 break;/*chip FIFO no ccb for completion already*/ 2785 } else { 2786 UDELAY(25000); 2787 if ((poll_count > 100) && (poll_srb != NULL)) { 2788 break; 2789 } 2790 if (acb->srboutstandingcount == 0) { 2791 break; 2792 } 2793 goto polling_ccb_retry; 2794 } 2795 } 2796 flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low); 2797 /* check if command done with no error*/ 2798 srb=(struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFF0));/*frame must be 32 bytes aligned*/ 2799 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE; 2800 if (poll_srb != NULL) 2801 poll_srb_done = (srb==poll_srb) ? 1:0; 2802 if((srb->acb!=acb) || (srb->startdone!=ARCMSR_SRB_START)) { 2803 if(srb->startdone==ARCMSR_SRB_ABORTED) { 2804 kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p'poll command abort successfully \n" 2805 , acb->pci_unit, srb->pccb->ccb_h.target_id, srb->pccb->ccb_h.target_lun, srb); 2806 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 2807 arcmsr_srb_complete(srb, 1); 2808 continue; 2809 } 2810 kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'srboutstandingcount=%d \n" 2811 , acb->pci_unit, srb, acb->srboutstandingcount); 2812 continue; 2813 } 2814 arcmsr_report_srb_state(acb, srb, error); 2815 } /*drain reply FIFO*/ 2816 return; 2817 } 2818 /* 2819 ********************************************************************** 2820 ********************************************************************** 2821 */ 2822 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) 2823 { 2824 switch (acb->adapter_type) { 2825 case ACB_ADAPTER_TYPE_A: { 2826 arcmsr_polling_hba_srbdone(acb, poll_srb); 2827 } 2828 break; 2829 case ACB_ADAPTER_TYPE_B: { 2830 arcmsr_polling_hbb_srbdone(acb, poll_srb); 2831 } 2832 break; 2833 case ACB_ADAPTER_TYPE_C: { 2834 arcmsr_polling_hbc_srbdone(acb, poll_srb); 2835 } 2836 break; 2837 } 2838 } 2839 /* 2840 ********************************************************************** 2841 ********************************************************************** 2842 */ 2843 static void arcmsr_get_hba_config(struct AdapterControlBlock *acb) 2844 { 2845 char *acb_firm_model=acb->firm_model; 2846 char *acb_firm_version=acb->firm_version; 2847 char *acb_device_map = acb->device_map; 2848 size_t iop_firm_model=offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/ 2849 size_t iop_firm_version=offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/ 2850 size_t iop_device_map = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); 2851 int i; 2852 2853 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); 2854 if(!arcmsr_hba_wait_msgint_ready(acb)) { 2855 kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit); 2856 } 2857 i=0; 2858 while(i<8) { 2859 *acb_firm_model=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i); 2860 /* 8 bytes firm_model, 15, 60-67*/ 2861 acb_firm_model++; 2862 i++; 2863 } 2864 i=0; 2865 while(i<16) { 2866 *acb_firm_version=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i); 2867 /* 16 bytes firm_version, 17, 68-83*/ 2868 acb_firm_version++; 2869 i++; 2870 } 2871 i=0; 2872 while(i<16) { 2873 *acb_device_map=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i); 2874 acb_device_map++; 2875 i++; 2876 } 2877 kprintf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION); 2878 kprintf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version); 2879 acb->firm_request_len=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ 2880 acb->firm_numbers_queue=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ 2881 acb->firm_sdram_size=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ 2882 acb->firm_ide_channels=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ 2883 acb->firm_cfg_version=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */ 2884 return; 2885 } 2886 /* 2887 ********************************************************************** 2888 ********************************************************************** 2889 */ 2890 static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb) 2891 { 2892 char *acb_firm_model=acb->firm_model; 2893 char *acb_firm_version=acb->firm_version; 2894 char *acb_device_map = acb->device_map; 2895 size_t iop_firm_model=offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/ 2896 size_t iop_firm_version=offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/ 2897 size_t iop_device_map = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); 2898 int i; 2899 2900 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG); 2901 if(!arcmsr_hbb_wait_msgint_ready(acb)) { 2902 kprintf( "arcmsr%d: wait" "'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit); 2903 } 2904 i=0; 2905 while(i<8) { 2906 *acb_firm_model=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_model+i); 2907 /* 8 bytes firm_model, 15, 60-67*/ 2908 acb_firm_model++; 2909 i++; 2910 } 2911 i=0; 2912 while(i<16) { 2913 *acb_firm_version=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_version+i); 2914 /* 16 bytes firm_version, 17, 68-83*/ 2915 acb_firm_version++; 2916 i++; 2917 } 2918 i=0; 2919 while(i<16) { 2920 *acb_device_map=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_device_map+i); 2921 acb_device_map++; 2922 i++; 2923 } 2924 kprintf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION); 2925 kprintf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version); 2926 acb->firm_request_len=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ 2927 acb->firm_numbers_queue=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ 2928 acb->firm_sdram_size=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ 2929 acb->firm_ide_channels=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ 2930 acb->firm_cfg_version=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */ 2931 return; 2932 } 2933 /* 2934 ********************************************************************** 2935 ********************************************************************** 2936 */ 2937 static void arcmsr_get_hbc_config(struct AdapterControlBlock *acb) 2938 { 2939 char *acb_firm_model=acb->firm_model; 2940 char *acb_firm_version=acb->firm_version; 2941 char *acb_device_map = acb->device_map; 2942 size_t iop_firm_model=offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/ 2943 size_t iop_firm_version=offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/ 2944 size_t iop_device_map = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); 2945 int i; 2946 2947 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); 2948 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); 2949 if(!arcmsr_hbc_wait_msgint_ready(acb)) { 2950 kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit); 2951 } 2952 i=0; 2953 while(i<8) { 2954 *acb_firm_model=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i); 2955 /* 8 bytes firm_model, 15, 60-67*/ 2956 acb_firm_model++; 2957 i++; 2958 } 2959 i=0; 2960 while(i<16) { 2961 *acb_firm_version=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i); 2962 /* 16 bytes firm_version, 17, 68-83*/ 2963 acb_firm_version++; 2964 i++; 2965 } 2966 i=0; 2967 while(i<16) { 2968 *acb_device_map=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i); 2969 acb_device_map++; 2970 i++; 2971 } 2972 kprintf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION); 2973 kprintf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version); 2974 acb->firm_request_len =CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ 2975 acb->firm_numbers_queue =CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ 2976 acb->firm_sdram_size =CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ 2977 acb->firm_ide_channels =CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ 2978 acb->firm_cfg_version =CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */ 2979 return; 2980 } 2981 /* 2982 ********************************************************************** 2983 ********************************************************************** 2984 */ 2985 static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) 2986 { 2987 switch (acb->adapter_type) { 2988 case ACB_ADAPTER_TYPE_A: { 2989 arcmsr_get_hba_config(acb); 2990 } 2991 break; 2992 case ACB_ADAPTER_TYPE_B: { 2993 arcmsr_get_hbb_config(acb); 2994 } 2995 break; 2996 case ACB_ADAPTER_TYPE_C: { 2997 arcmsr_get_hbc_config(acb); 2998 } 2999 break; 3000 } 3001 return; 3002 } 3003 /* 3004 ********************************************************************** 3005 ********************************************************************** 3006 */ 3007 static void arcmsr_wait_firmware_ready( struct AdapterControlBlock *acb) 3008 { 3009 int timeout=0; 3010 3011 switch (acb->adapter_type) { 3012 case ACB_ADAPTER_TYPE_A: { 3013 while ((CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) 3014 { 3015 if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */ 3016 { 3017 kprintf( "arcmsr%d:timed out waiting for firmware \n", acb->pci_unit); 3018 return; 3019 } 3020 UDELAY(15000); /* wait 15 milli-seconds */ 3021 } 3022 } 3023 break; 3024 case ACB_ADAPTER_TYPE_B: { 3025 while ((CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & ARCMSR_MESSAGE_FIRMWARE_OK) == 0) 3026 { 3027 if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */ 3028 { 3029 kprintf( "arcmsr%d: timed out waiting for firmware \n", acb->pci_unit); 3030 return; 3031 } 3032 UDELAY(15000); /* wait 15 milli-seconds */ 3033 } 3034 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT); 3035 } 3036 break; 3037 case ACB_ADAPTER_TYPE_C: { 3038 while ((CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0) 3039 { 3040 if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */ 3041 { 3042 kprintf( "arcmsr%d:timed out waiting for firmware ready\n", acb->pci_unit); 3043 return; 3044 } 3045 UDELAY(15000); /* wait 15 milli-seconds */ 3046 } 3047 } 3048 break; 3049 } 3050 return; 3051 } 3052 /* 3053 ********************************************************************** 3054 ********************************************************************** 3055 */ 3056 static void arcmsr_clear_doorbell_queue_buffer( struct AdapterControlBlock *acb) 3057 { 3058 u_int32_t outbound_doorbell; 3059 3060 switch (acb->adapter_type) { 3061 case ACB_ADAPTER_TYPE_A: { 3062 /* empty doorbell Qbuffer if door bell ringed */ 3063 outbound_doorbell = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell); 3064 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, outbound_doorbell); /*clear doorbell interrupt */ 3065 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 3066 3067 } 3068 break; 3069 case ACB_ADAPTER_TYPE_B: { 3070 CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt and message state*/ 3071 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK); 3072 /* let IOP know data has been read */ 3073 } 3074 break; 3075 case ACB_ADAPTER_TYPE_C: { 3076 /* empty doorbell Qbuffer if door bell ringed */ 3077 outbound_doorbell = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell); 3078 CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell); /*clear doorbell interrupt */ 3079 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK); 3080 3081 } 3082 break; 3083 } 3084 return; 3085 } 3086 /* 3087 ************************************************************************ 3088 ************************************************************************ 3089 */ 3090 static u_int32_t arcmsr_iop_confirm(struct AdapterControlBlock *acb) 3091 { 3092 unsigned long srb_phyaddr; 3093 u_int32_t srb_phyaddr_hi32; 3094 3095 /* 3096 ******************************************************************** 3097 ** here we need to tell iop 331 our freesrb.HighPart 3098 ** if freesrb.HighPart is not zero 3099 ******************************************************************** 3100 */ 3101 srb_phyaddr= (unsigned long) acb->srb_phyaddr.phyaddr; 3102 // srb_phyaddr_hi32=(u_int32_t) ((srb_phyaddr>>16)>>16); 3103 srb_phyaddr_hi32=acb->srb_phyaddr.B.phyadd_high; 3104 switch (acb->adapter_type) { 3105 case ACB_ADAPTER_TYPE_A: { 3106 if(srb_phyaddr_hi32!=0) { 3107 CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); 3108 CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32); 3109 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG); 3110 if(!arcmsr_hba_wait_msgint_ready(acb)) { 3111 kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit); 3112 return FALSE; 3113 } 3114 } 3115 } 3116 break; 3117 /* 3118 *********************************************************************** 3119 ** if adapter type B, set window of "post command Q" 3120 *********************************************************************** 3121 */ 3122 case ACB_ADAPTER_TYPE_B: { 3123 u_int32_t post_queue_phyaddr; 3124 struct HBB_MessageUnit *phbbmu; 3125 3126 phbbmu=(struct HBB_MessageUnit *)acb->pmu; 3127 phbbmu->postq_index=0; 3128 phbbmu->doneq_index=0; 3129 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_SET_POST_WINDOW); 3130 if(!arcmsr_hbb_wait_msgint_ready(acb)) { 3131 kprintf( "arcmsr%d: 'set window of post command Q' timeout\n", acb->pci_unit); 3132 return FALSE; 3133 } 3134 post_queue_phyaddr = srb_phyaddr + ARCMSR_MAX_FREESRB_NUM*sizeof(struct CommandControlBlock) 3135 + offsetof(struct HBB_MessageUnit, post_qbuffer); 3136 CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); /* driver "set config" signature */ 3137 CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1], srb_phyaddr_hi32); /* normal should be zero */ 3138 CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2], post_queue_phyaddr); /* postQ size (256+8)*4 */ 3139 CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3], post_queue_phyaddr+1056); /* doneQ size (256+8)*4 */ 3140 CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4], 1056); /* srb maxQ size must be --> [(256+8)*4] */ 3141 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_SET_CONFIG); 3142 if(!arcmsr_hbb_wait_msgint_ready(acb)) { 3143 kprintf( "arcmsr%d: 'set command Q window' timeout \n", acb->pci_unit); 3144 return FALSE; 3145 } 3146 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_START_DRIVER_MODE); 3147 if(!arcmsr_hbb_wait_msgint_ready(acb)) { 3148 kprintf( "arcmsr%d: 'start diver mode' timeout \n", acb->pci_unit); 3149 return FALSE; 3150 } 3151 } 3152 break; 3153 case ACB_ADAPTER_TYPE_C: { 3154 if(srb_phyaddr_hi32!=0) { 3155 CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); 3156 CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32); 3157 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG); 3158 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); 3159 if(!arcmsr_hbc_wait_msgint_ready(acb)) { 3160 kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit); 3161 return FALSE; 3162 } 3163 } 3164 } 3165 break; 3166 } 3167 return TRUE; 3168 } 3169 /* 3170 ************************************************************************ 3171 ************************************************************************ 3172 */ 3173 static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb) 3174 { 3175 switch (acb->adapter_type) 3176 { 3177 case ACB_ADAPTER_TYPE_A: 3178 case ACB_ADAPTER_TYPE_C: 3179 break; 3180 case ACB_ADAPTER_TYPE_B: { 3181 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell,ARCMSR_MESSAGE_ACTIVE_EOI_MODE); 3182 if(!arcmsr_hbb_wait_msgint_ready(acb)) { 3183 kprintf( "arcmsr%d: 'iop enable eoi mode' timeout \n", acb->pci_unit); 3184 3185 return; 3186 } 3187 } 3188 break; 3189 } 3190 return; 3191 } 3192 /* 3193 ********************************************************************** 3194 ********************************************************************** 3195 */ 3196 static void arcmsr_iop_init(struct AdapterControlBlock *acb) 3197 { 3198 u_int32_t intmask_org; 3199 3200 /* disable all outbound interrupt */ 3201 intmask_org=arcmsr_disable_allintr(acb); 3202 arcmsr_wait_firmware_ready(acb); 3203 arcmsr_iop_confirm(acb); 3204 arcmsr_get_firmware_spec(acb); 3205 /*start background rebuild*/ 3206 arcmsr_start_adapter_bgrb(acb); 3207 /* empty doorbell Qbuffer if door bell ringed */ 3208 arcmsr_clear_doorbell_queue_buffer(acb); 3209 arcmsr_enable_eoi_mode(acb); 3210 /* enable outbound Post Queue, outbound doorbell Interrupt */ 3211 arcmsr_enable_allintr(acb, intmask_org); 3212 acb->acb_flags |=ACB_F_IOP_INITED; 3213 return; 3214 } 3215 /* 3216 ********************************************************************** 3217 ********************************************************************** 3218 */ 3219 static void arcmsr_map_free_srb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 3220 { 3221 struct AdapterControlBlock *acb=arg; 3222 struct CommandControlBlock *srb_tmp; 3223 u_int8_t * dma_memptr; 3224 u_int32_t i; 3225 unsigned long srb_phyaddr=(unsigned long)segs->ds_addr; 3226 3227 dma_memptr=acb->uncacheptr; 3228 acb->srb_phyaddr.phyaddr=srb_phyaddr; 3229 srb_tmp=(struct CommandControlBlock *)dma_memptr; 3230 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) { 3231 if(bus_dmamap_create(acb->dm_segs_dmat, 3232 /*flags*/0, &srb_tmp->dm_segs_dmamap)!=0) { 3233 acb->acb_flags |= ACB_F_MAPFREESRB_FAILD; 3234 kprintf("arcmsr%d:" 3235 " srb dmamap bus_dmamap_create error\n", acb->pci_unit); 3236 return; 3237 } 3238 srb_tmp->cdb_shifted_phyaddr=(acb->adapter_type==ACB_ADAPTER_TYPE_C)?srb_phyaddr:(srb_phyaddr >> 5); 3239 srb_tmp->acb=acb; 3240 acb->srbworkingQ[i]=acb->psrb_pool[i]=srb_tmp; 3241 srb_phyaddr=srb_phyaddr+sizeof(struct CommandControlBlock); 3242 srb_tmp++; 3243 } 3244 acb->vir2phy_offset=(unsigned long)srb_tmp-(unsigned long)srb_phyaddr; 3245 return; 3246 } 3247 /* 3248 ************************************************************************ 3249 ** 3250 ** 3251 ************************************************************************ 3252 */ 3253 static void arcmsr_free_resource(struct AdapterControlBlock *acb) 3254 { 3255 /* remove the control device */ 3256 if(acb->ioctl_dev != NULL) { 3257 destroy_dev(acb->ioctl_dev); 3258 } 3259 bus_dmamap_unload(acb->srb_dmat, acb->srb_dmamap); 3260 bus_dmamap_destroy(acb->srb_dmat, acb->srb_dmamap); 3261 bus_dma_tag_destroy(acb->srb_dmat); 3262 bus_dma_tag_destroy(acb->dm_segs_dmat); 3263 bus_dma_tag_destroy(acb->parent_dmat); 3264 return; 3265 } 3266 /* 3267 ************************************************************************ 3268 ************************************************************************ 3269 */ 3270 static u_int32_t arcmsr_initialize(device_t dev) 3271 { 3272 struct AdapterControlBlock *acb=device_get_softc(dev); 3273 u_int16_t pci_command; 3274 int i, j,max_coherent_size; 3275 3276 switch (pci_get_devid(dev)) { 3277 case PCIDevVenIDARC1880: { 3278 acb->adapter_type=ACB_ADAPTER_TYPE_C; 3279 max_coherent_size=ARCMSR_SRBS_POOL_SIZE; 3280 } 3281 break; 3282 case PCIDevVenIDARC1200: 3283 case PCIDevVenIDARC1201: { 3284 acb->adapter_type=ACB_ADAPTER_TYPE_B; 3285 max_coherent_size=ARCMSR_SRBS_POOL_SIZE+(sizeof(struct HBB_MessageUnit)); 3286 } 3287 break; 3288 case PCIDevVenIDARC1110: 3289 case PCIDevVenIDARC1120: 3290 case PCIDevVenIDARC1130: 3291 case PCIDevVenIDARC1160: 3292 case PCIDevVenIDARC1170: 3293 case PCIDevVenIDARC1210: 3294 case PCIDevVenIDARC1220: 3295 case PCIDevVenIDARC1230: 3296 case PCIDevVenIDARC1231: 3297 case PCIDevVenIDARC1260: 3298 case PCIDevVenIDARC1261: 3299 case PCIDevVenIDARC1270: 3300 case PCIDevVenIDARC1280: 3301 case PCIDevVenIDARC1212: 3302 case PCIDevVenIDARC1222: 3303 case PCIDevVenIDARC1380: 3304 case PCIDevVenIDARC1381: 3305 case PCIDevVenIDARC1680: 3306 case PCIDevVenIDARC1681: { 3307 acb->adapter_type=ACB_ADAPTER_TYPE_A; 3308 max_coherent_size=ARCMSR_SRBS_POOL_SIZE; 3309 } 3310 break; 3311 default: { 3312 kprintf("arcmsr%d:" 3313 " unknown RAID adapter type \n", device_get_unit(dev)); 3314 return ENOMEM; 3315 } 3316 } 3317 if(bus_dma_tag_create( /*parent*/ NULL, 3318 /*alignemnt*/ 1, 3319 /*boundary*/ 0, 3320 /*lowaddr*/ BUS_SPACE_MAXADDR, 3321 /*highaddr*/ BUS_SPACE_MAXADDR, 3322 /*filter*/ NULL, 3323 /*filterarg*/ NULL, 3324 /*maxsize*/ BUS_SPACE_MAXSIZE_32BIT, 3325 /*nsegments*/ BUS_SPACE_UNRESTRICTED, 3326 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, 3327 /*flags*/ 0, 3328 &acb->parent_dmat) != 0) 3329 { 3330 kprintf("arcmsr%d: parent_dmat bus_dma_tag_create failure!\n", device_get_unit(dev)); 3331 return ENOMEM; 3332 } 3333 /* Create a single tag describing a region large enough to hold all of the s/g lists we will need. */ 3334 if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat, 3335 /*alignment*/ 1, 3336 /*boundary*/ 0, 3337 /*lowaddr*/ BUS_SPACE_MAXADDR, 3338 /*highaddr*/ BUS_SPACE_MAXADDR, 3339 /*filter*/ NULL, 3340 /*filterarg*/ NULL, 3341 /*maxsize*/ ARCMSR_MAX_SG_ENTRIES * PAGE_SIZE * ARCMSR_MAX_FREESRB_NUM, 3342 /*nsegments*/ ARCMSR_MAX_SG_ENTRIES, 3343 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, 3344 /*flags*/ 0, 3345 &acb->dm_segs_dmat) != 0) 3346 { 3347 bus_dma_tag_destroy(acb->parent_dmat); 3348 kprintf("arcmsr%d: dm_segs_dmat bus_dma_tag_create failure!\n", device_get_unit(dev)); 3349 return ENOMEM; 3350 } 3351 /* DMA tag for our srb structures.... Allocate the freesrb memory */ 3352 if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat, 3353 /*alignment*/ 0x20, 3354 /*boundary*/ 0, 3355 /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT, 3356 /*highaddr*/ BUS_SPACE_MAXADDR, 3357 /*filter*/ NULL, 3358 /*filterarg*/ NULL, 3359 /*maxsize*/ max_coherent_size, 3360 /*nsegments*/ 1, 3361 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, 3362 /*flags*/ 0, 3363 &acb->srb_dmat) != 0) 3364 { 3365 bus_dma_tag_destroy(acb->dm_segs_dmat); 3366 bus_dma_tag_destroy(acb->parent_dmat); 3367 kprintf("arcmsr%d: srb_dmat bus_dma_tag_create failure!\n", device_get_unit(dev)); 3368 return ENXIO; 3369 } 3370 /* Allocation for our srbs */ 3371 if(bus_dmamem_alloc(acb->srb_dmat, (void **)&acb->uncacheptr, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &acb->srb_dmamap) != 0) { 3372 bus_dma_tag_destroy(acb->srb_dmat); 3373 bus_dma_tag_destroy(acb->dm_segs_dmat); 3374 bus_dma_tag_destroy(acb->parent_dmat); 3375 kprintf("arcmsr%d: srb_dmat bus_dmamem_alloc failure!\n", device_get_unit(dev)); 3376 return ENXIO; 3377 } 3378 /* And permanently map them */ 3379 if(bus_dmamap_load(acb->srb_dmat, acb->srb_dmamap, acb->uncacheptr, max_coherent_size, arcmsr_map_free_srb, acb, /*flags*/0)) { 3380 bus_dma_tag_destroy(acb->srb_dmat); 3381 bus_dma_tag_destroy(acb->dm_segs_dmat); 3382 bus_dma_tag_destroy(acb->parent_dmat); 3383 kprintf("arcmsr%d: srb_dmat bus_dmamap_load failure!\n", device_get_unit(dev)); 3384 return ENXIO; 3385 } 3386 pci_command=pci_read_config(dev, PCIR_COMMAND, 2); 3387 pci_command |= PCIM_CMD_BUSMASTEREN; 3388 pci_command |= PCIM_CMD_PERRESPEN; 3389 pci_command |= PCIM_CMD_MWRICEN; 3390 /* Enable Busmaster/Mem */ 3391 pci_command |= PCIM_CMD_MEMEN; 3392 pci_write_config(dev, PCIR_COMMAND, pci_command, 2); 3393 switch(acb->adapter_type) { 3394 case ACB_ADAPTER_TYPE_A: { 3395 u_int32_t rid0=PCIR_BAR(0); 3396 vm_offset_t mem_base0; 3397 3398 acb->sys_res_arcmsr[0]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, 0x1000, RF_ACTIVE); 3399 if(acb->sys_res_arcmsr[0] == NULL) { 3400 arcmsr_free_resource(acb); 3401 kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev)); 3402 return ENOMEM; 3403 } 3404 if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) { 3405 arcmsr_free_resource(acb); 3406 kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev)); 3407 return ENXIO; 3408 } 3409 mem_base0=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]); 3410 if(mem_base0==0) { 3411 arcmsr_free_resource(acb); 3412 kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev)); 3413 return ENXIO; 3414 } 3415 acb->btag[0]=rman_get_bustag(acb->sys_res_arcmsr[0]); 3416 acb->bhandle[0]=rman_get_bushandle(acb->sys_res_arcmsr[0]); 3417 acb->pmu=(struct MessageUnit_UNION *)mem_base0; 3418 } 3419 break; 3420 case ACB_ADAPTER_TYPE_B: { 3421 struct HBB_MessageUnit *phbbmu; 3422 struct CommandControlBlock *freesrb; 3423 u_int32_t rid[]={ PCIR_BAR(0), PCIR_BAR(2) }; 3424 vm_offset_t mem_base[]={0,0}; 3425 for(i=0; i<2; i++) { 3426 if(i==0) { 3427 acb->sys_res_arcmsr[i]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid[i], 3428 0ul, ~0ul, sizeof(struct HBB_DOORBELL), RF_ACTIVE); 3429 } else { 3430 acb->sys_res_arcmsr[i]=bus_alloc_resource(dev, SYS_RES_MEMORY, &rid[i], 3431 0ul, ~0ul, sizeof(struct HBB_RWBUFFER), RF_ACTIVE); 3432 } 3433 if(acb->sys_res_arcmsr[i] == NULL) { 3434 arcmsr_free_resource(acb); 3435 kprintf("arcmsr%d: bus_alloc_resource %d failure!\n", device_get_unit(dev), i); 3436 return ENOMEM; 3437 } 3438 if(rman_get_start(acb->sys_res_arcmsr[i]) <= 0) { 3439 arcmsr_free_resource(acb); 3440 kprintf("arcmsr%d: rman_get_start %d failure!\n", device_get_unit(dev), i); 3441 return ENXIO; 3442 } 3443 mem_base[i]=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[i]); 3444 if(mem_base[i]==0) { 3445 arcmsr_free_resource(acb); 3446 kprintf("arcmsr%d: rman_get_virtual %d failure!\n", device_get_unit(dev), i); 3447 return ENXIO; 3448 } 3449 acb->btag[i]=rman_get_bustag(acb->sys_res_arcmsr[i]); 3450 acb->bhandle[i]=rman_get_bushandle(acb->sys_res_arcmsr[i]); 3451 } 3452 freesrb=(struct CommandControlBlock *)acb->uncacheptr; 3453 acb->pmu=(struct MessageUnit_UNION *)&freesrb[ARCMSR_MAX_FREESRB_NUM]; 3454 phbbmu=(struct HBB_MessageUnit *)acb->pmu; 3455 phbbmu->hbb_doorbell=(struct HBB_DOORBELL *)mem_base[0]; 3456 phbbmu->hbb_rwbuffer=(struct HBB_RWBUFFER *)mem_base[1]; 3457 } 3458 break; 3459 case ACB_ADAPTER_TYPE_C: { 3460 u_int32_t rid0=PCIR_BAR(1); 3461 vm_offset_t mem_base0; 3462 3463 acb->sys_res_arcmsr[0]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, sizeof(struct HBC_MessageUnit), RF_ACTIVE); 3464 if(acb->sys_res_arcmsr[0] == NULL) { 3465 arcmsr_free_resource(acb); 3466 kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev)); 3467 return ENOMEM; 3468 } 3469 if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) { 3470 arcmsr_free_resource(acb); 3471 kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev)); 3472 return ENXIO; 3473 } 3474 mem_base0=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]); 3475 if(mem_base0==0) { 3476 arcmsr_free_resource(acb); 3477 kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev)); 3478 return ENXIO; 3479 } 3480 acb->btag[0]=rman_get_bustag(acb->sys_res_arcmsr[0]); 3481 acb->bhandle[0]=rman_get_bushandle(acb->sys_res_arcmsr[0]); 3482 acb->pmu=(struct MessageUnit_UNION *)mem_base0; 3483 } 3484 break; 3485 } 3486 if(acb->acb_flags & ACB_F_MAPFREESRB_FAILD) { 3487 arcmsr_free_resource(acb); 3488 kprintf("arcmsr%d: map free srb failure!\n", device_get_unit(dev)); 3489 return ENXIO; 3490 } 3491 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_RQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ); 3492 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; 3493 /* 3494 ******************************************************************** 3495 ** init raid volume state 3496 ******************************************************************** 3497 */ 3498 for(i=0;i<ARCMSR_MAX_TARGETID;i++) { 3499 for(j=0;j<ARCMSR_MAX_TARGETLUN;j++) { 3500 acb->devstate[i][j]=ARECA_RAID_GONE; 3501 } 3502 } 3503 arcmsr_iop_init(acb); 3504 return(0); 3505 } 3506 /* 3507 ************************************************************************ 3508 ************************************************************************ 3509 */ 3510 static int arcmsr_attach(device_t dev) 3511 { 3512 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); 3513 u_int32_t unit=device_get_unit(dev); 3514 struct ccb_setasync csa; 3515 struct cam_devq *devq; /* Device Queue to use for this SIM */ 3516 struct resource *irqres; 3517 int rid; 3518 u_int irq_flags; 3519 3520 if(acb == NULL) { 3521 kprintf("arcmsr%d: cannot allocate softc\n", unit); 3522 return (ENOMEM); 3523 } 3524 ARCMSR_LOCK_INIT(&acb->qbuffer_lock, "arcmsr Q buffer lock"); 3525 if(arcmsr_initialize(dev)) { 3526 kprintf("arcmsr%d: initialize failure!\n", unit); 3527 ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock); 3528 return ENXIO; 3529 } 3530 /* After setting up the adapter, map our interrupt */ 3531 rid=0; 3532 acb->irq_type = pci_alloc_1intr(dev, arcmsr_msi_enable, &rid, 3533 &irq_flags); 3534 irqres=bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0ul, ~0ul, 1, 3535 irq_flags); 3536 if(irqres == NULL || 3537 bus_setup_intr(dev, irqres, INTR_MPSAFE, arcmsr_intr_handler, acb, &acb->ih, NULL)) { 3538 arcmsr_free_resource(acb); 3539 ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock); 3540 kprintf("arcmsr%d: unable to register interrupt handler!\n", unit); 3541 return ENXIO; 3542 } 3543 acb->irqres=irqres; 3544 acb->pci_dev=dev; 3545 acb->pci_unit=unit; 3546 /* 3547 * Now let the CAM generic SCSI layer find the SCSI devices on 3548 * the bus * start queue to reset to the idle loop. * 3549 * Create device queue of SIM(s) * (MAX_START_JOB - 1) : 3550 * max_sim_transactions 3551 */ 3552 devq=cam_simq_alloc(ARCMSR_MAX_START_JOB); 3553 if(devq == NULL) { 3554 arcmsr_free_resource(acb); 3555 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); 3556 if (acb->irq_type == PCI_INTR_TYPE_MSI) 3557 pci_release_msi(dev); 3558 ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock); 3559 kprintf("arcmsr%d: cam_simq_alloc failure!\n", unit); 3560 return ENXIO; 3561 } 3562 acb->psim=cam_sim_alloc(arcmsr_action, arcmsr_poll, "arcmsr", acb, unit, &acb->qbuffer_lock, 1, ARCMSR_MAX_OUTSTANDING_CMD, devq); 3563 if(acb->psim == NULL) { 3564 arcmsr_free_resource(acb); 3565 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); 3566 if (acb->irq_type == PCI_INTR_TYPE_MSI) 3567 pci_release_msi(dev); 3568 cam_simq_release(devq); 3569 ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock); 3570 kprintf("arcmsr%d: cam_sim_alloc failure!\n", unit); 3571 return ENXIO; 3572 } 3573 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); 3574 if(xpt_bus_register(acb->psim, 0) != CAM_SUCCESS) { 3575 arcmsr_free_resource(acb); 3576 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); 3577 if (acb->irq_type == PCI_INTR_TYPE_MSI) 3578 pci_release_msi(dev); 3579 cam_sim_free(acb->psim); 3580 ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock); 3581 kprintf("arcmsr%d: xpt_bus_register failure!\n", unit); 3582 return ENXIO; 3583 } 3584 if(xpt_create_path(&acb->ppath, /* periph */ NULL, cam_sim_path(acb->psim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 3585 arcmsr_free_resource(acb); 3586 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); 3587 if (acb->irq_type == PCI_INTR_TYPE_MSI) 3588 pci_release_msi(dev); 3589 xpt_bus_deregister(cam_sim_path(acb->psim)); 3590 cam_sim_free(acb->psim); 3591 ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock); 3592 kprintf("arcmsr%d: xpt_create_path failure!\n", unit); 3593 return ENXIO; 3594 } 3595 /* 3596 **************************************************** 3597 */ 3598 xpt_setup_ccb(&csa.ccb_h, acb->ppath, /*priority*/5); 3599 csa.ccb_h.func_code=XPT_SASYNC_CB; 3600 csa.event_enable=AC_FOUND_DEVICE|AC_LOST_DEVICE; 3601 csa.callback=arcmsr_async; 3602 csa.callback_arg=acb->psim; 3603 xpt_action((union ccb *)&csa); 3604 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 3605 /* Create the control device. */ 3606 acb->ioctl_dev=make_dev(&arcmsr_ops, unit, UID_ROOT, GID_WHEEL /* GID_OPERATOR */, S_IRUSR | S_IWUSR, "arcmsr%d", unit); 3607 3608 acb->ioctl_dev->si_drv1=acb; 3609 (void)make_dev_alias(acb->ioctl_dev, "arc%d", unit); 3610 callout_init(&acb->devmap_callout); 3611 callout_reset(&acb->devmap_callout, 60 * hz, arcmsr_polling_devmap, acb); 3612 return 0; 3613 } 3614 /* 3615 ************************************************************************ 3616 ************************************************************************ 3617 */ 3618 static int arcmsr_probe(device_t dev) 3619 { 3620 u_int32_t id; 3621 static char buf[256]; 3622 char x_type[]={"X-TYPE"}; 3623 char *type; 3624 int raid6 = 1; 3625 3626 if (pci_get_vendor(dev) != PCI_VENDOR_ID_ARECA) { 3627 return (ENXIO); 3628 } 3629 switch(id=pci_get_devid(dev)) { 3630 case PCIDevVenIDARC1110: 3631 case PCIDevVenIDARC1200: 3632 case PCIDevVenIDARC1201: 3633 case PCIDevVenIDARC1210: 3634 raid6 = 0; 3635 /*FALLTHRU*/ 3636 case PCIDevVenIDARC1120: 3637 case PCIDevVenIDARC1130: 3638 case PCIDevVenIDARC1160: 3639 case PCIDevVenIDARC1170: 3640 case PCIDevVenIDARC1220: 3641 case PCIDevVenIDARC1230: 3642 case PCIDevVenIDARC1231: 3643 case PCIDevVenIDARC1260: 3644 case PCIDevVenIDARC1261: 3645 case PCIDevVenIDARC1270: 3646 case PCIDevVenIDARC1280: 3647 type = "SATA"; 3648 break; 3649 case PCIDevVenIDARC1212: 3650 case PCIDevVenIDARC1222: 3651 case PCIDevVenIDARC1380: 3652 case PCIDevVenIDARC1381: 3653 case PCIDevVenIDARC1680: 3654 case PCIDevVenIDARC1681: 3655 type = "SAS 3G"; 3656 break; 3657 case PCIDevVenIDARC1880: 3658 type = "SAS 6G"; 3659 break; 3660 default: 3661 type = x_type; 3662 break; 3663 } 3664 if(type == x_type) 3665 return(ENXIO); 3666 ksprintf(buf, "Areca %s Host Adapter RAID Controller%s", type, raid6 ? " (RAID6 capable)" : ""); 3667 device_set_desc_copy(dev, buf); 3668 return 0; 3669 } 3670 /* 3671 ************************************************************************ 3672 ************************************************************************ 3673 */ 3674 static int arcmsr_shutdown(device_t dev) 3675 { 3676 u_int32_t i; 3677 u_int32_t intmask_org; 3678 struct CommandControlBlock *srb; 3679 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); 3680 3681 /* stop adapter background rebuild */ 3682 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); 3683 /* disable all outbound interrupt */ 3684 intmask_org=arcmsr_disable_allintr(acb); 3685 arcmsr_stop_adapter_bgrb(acb); 3686 arcmsr_flush_adapter_cache(acb); 3687 /* abort all outstanding command */ 3688 acb->acb_flags |= ACB_F_SCSISTOPADAPTER; 3689 acb->acb_flags &= ~ACB_F_IOP_INITED; 3690 if(acb->srboutstandingcount!=0) { 3691 /*clear and abort all outbound posted Q*/ 3692 arcmsr_done4abort_postqueue(acb); 3693 /* talk to iop 331 outstanding command aborted*/ 3694 arcmsr_abort_allcmd(acb); 3695 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) { 3696 srb=acb->psrb_pool[i]; 3697 if(srb->startdone==ARCMSR_SRB_START) { 3698 srb->startdone=ARCMSR_SRB_ABORTED; 3699 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 3700 arcmsr_srb_complete(srb, 1); 3701 } 3702 } 3703 } 3704 atomic_set_int(&acb->srboutstandingcount, 0); 3705 acb->workingsrb_doneindex=0; 3706 acb->workingsrb_startindex=0; 3707 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 3708 return (0); 3709 } 3710 /* 3711 ************************************************************************ 3712 ************************************************************************ 3713 */ 3714 static int arcmsr_detach(device_t dev) 3715 { 3716 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); 3717 int i; 3718 3719 callout_stop(&acb->devmap_callout); 3720 bus_teardown_intr(dev, acb->irqres, acb->ih); 3721 arcmsr_shutdown(dev); 3722 arcmsr_free_resource(acb); 3723 for(i=0; (acb->sys_res_arcmsr[i]!=NULL) && (i<2); i++) { 3724 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(i), acb->sys_res_arcmsr[i]); 3725 } 3726 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); 3727 if (acb->irq_type == PCI_INTR_TYPE_MSI) 3728 pci_release_msi(dev); 3729 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); 3730 xpt_async(AC_LOST_DEVICE, acb->ppath, NULL); 3731 xpt_free_path(acb->ppath); 3732 xpt_bus_deregister(cam_sim_path(acb->psim)); 3733 cam_sim_free(acb->psim); 3734 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 3735 ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock); 3736 return (0); 3737 } 3738